Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ b047857b

History | View | Annotate | Download (153.7 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 a8083063 Iustin Pop
46 7c0d6283 Michael Hanselmann
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
52 a8083063 Iustin Pop
      with all the fields (even if as None)
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
57 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 a8083063 Iustin Pop
  REQ_CLUSTER = True
64 a8083063 Iustin Pop
  REQ_MASTER = True
65 a8083063 Iustin Pop
66 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
67 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
68 a8083063 Iustin Pop

69 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
70 a8083063 Iustin Pop
    validity.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    """
73 5bfac263 Iustin Pop
    self.proc = processor
74 a8083063 Iustin Pop
    self.op = op
75 a8083063 Iustin Pop
    self.cfg = cfg
76 a8083063 Iustin Pop
    self.sstore = sstore
77 c92b310a Michael Hanselmann
    self.__ssh = None
78 c92b310a Michael Hanselmann
79 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
80 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
81 a8083063 Iustin Pop
      if attr_val is None:
82 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
83 3ecf6786 Iustin Pop
                                   attr_name)
84 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
85 a8083063 Iustin Pop
      if not cfg.IsCluster():
86 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
87 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
88 a8083063 Iustin Pop
      if self.REQ_MASTER:
89 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
90 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
91 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
92 3ecf6786 Iustin Pop
                                     " node %s" % master)
93 a8083063 Iustin Pop
94 c92b310a Michael Hanselmann
  def __GetSSH(self):
95 c92b310a Michael Hanselmann
    """Returns the SshRunner object
96 c92b310a Michael Hanselmann

97 c92b310a Michael Hanselmann
    """
98 c92b310a Michael Hanselmann
    if not self.__ssh:
99 c92b310a Michael Hanselmann
      self.__ssh = ssh.SshRunner()
100 c92b310a Michael Hanselmann
    return self.__ssh
101 c92b310a Michael Hanselmann
102 c92b310a Michael Hanselmann
  ssh = property(fget=__GetSSH)
103 c92b310a Michael Hanselmann
104 a8083063 Iustin Pop
  def CheckPrereq(self):
105 a8083063 Iustin Pop
    """Check prerequisites for this LU.
106 a8083063 Iustin Pop

107 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
108 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
109 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
110 a8083063 Iustin Pop
    allowed.
111 a8083063 Iustin Pop

112 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
113 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
114 a8083063 Iustin Pop

115 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
116 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
117 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
118 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
119 a8083063 Iustin Pop

120 a8083063 Iustin Pop
    """
121 a8083063 Iustin Pop
    raise NotImplementedError
122 a8083063 Iustin Pop
123 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
124 a8083063 Iustin Pop
    """Execute the LU.
125 a8083063 Iustin Pop

126 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
127 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
128 a8083063 Iustin Pop
    code, or expected.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    """
131 a8083063 Iustin Pop
    raise NotImplementedError
132 a8083063 Iustin Pop
133 a8083063 Iustin Pop
  def BuildHooksEnv(self):
134 a8083063 Iustin Pop
    """Build hooks environment for this LU.
135 a8083063 Iustin Pop

136 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
137 a8083063 Iustin Pop
    containing the environment that will be used for running the
138 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
139 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
140 a8083063 Iustin Pop
    the hook should run after the execution.
141 a8083063 Iustin Pop

142 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
143 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
144 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
145 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
146 a8083063 Iustin Pop

147 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
148 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
149 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
150 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
151 a8083063 Iustin Pop
    None).
152 a8083063 Iustin Pop

153 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
154 a8083063 Iustin Pop
    not be called.
155 a8083063 Iustin Pop

156 a8083063 Iustin Pop
    """
157 a8083063 Iustin Pop
    raise NotImplementedError
158 a8083063 Iustin Pop
159 a8083063 Iustin Pop
160 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
161 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
162 a8083063 Iustin Pop

163 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
164 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
165 a8083063 Iustin Pop

166 a8083063 Iustin Pop
  """
167 a8083063 Iustin Pop
  HPATH = None
168 a8083063 Iustin Pop
  HTYPE = None
169 a8083063 Iustin Pop
170 a8083063 Iustin Pop
  def BuildHooksEnv(self):
171 a8083063 Iustin Pop
    """Build hooks env.
172 a8083063 Iustin Pop

173 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
174 a8083063 Iustin Pop

175 a8083063 Iustin Pop
    """
176 0e137c28 Iustin Pop
    return {}, [], []
177 a8083063 Iustin Pop
178 a8083063 Iustin Pop
179 9440aeab Michael Hanselmann
def _AddHostToEtcHosts(hostname):
180 9440aeab Michael Hanselmann
  """Wrapper around utils.SetEtcHostsEntry.
181 9440aeab Michael Hanselmann

182 9440aeab Michael Hanselmann
  """
183 9440aeab Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
184 9440aeab Michael Hanselmann
  utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
185 9440aeab Michael Hanselmann
186 9440aeab Michael Hanselmann
187 c8a0948f Michael Hanselmann
def _RemoveHostFromEtcHosts(hostname):
188 9440aeab Michael Hanselmann
  """Wrapper around utils.RemoveEtcHostsEntry.
189 c8a0948f Michael Hanselmann

190 c8a0948f Michael Hanselmann
  """
191 c8a0948f Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
192 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
193 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
194 c8a0948f Michael Hanselmann
195 c8a0948f Michael Hanselmann
196 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
197 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
198 83120a01 Michael Hanselmann

199 83120a01 Michael Hanselmann
  Args:
200 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
201 83120a01 Michael Hanselmann

202 83120a01 Michael Hanselmann
  """
203 3312b702 Iustin Pop
  if not isinstance(nodes, list):
204 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
205 dcb93971 Michael Hanselmann
206 dcb93971 Michael Hanselmann
  if nodes:
207 3312b702 Iustin Pop
    wanted = []
208 dcb93971 Michael Hanselmann
209 dcb93971 Michael Hanselmann
    for name in nodes:
210 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
211 dcb93971 Michael Hanselmann
      if node is None:
212 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
213 3312b702 Iustin Pop
      wanted.append(node)
214 dcb93971 Michael Hanselmann
215 dcb93971 Michael Hanselmann
  else:
216 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
217 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
218 3312b702 Iustin Pop
219 3312b702 Iustin Pop
220 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
221 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
222 3312b702 Iustin Pop

223 3312b702 Iustin Pop
  Args:
224 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
225 3312b702 Iustin Pop

226 3312b702 Iustin Pop
  """
227 3312b702 Iustin Pop
  if not isinstance(instances, list):
228 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
229 3312b702 Iustin Pop
230 3312b702 Iustin Pop
  if instances:
231 3312b702 Iustin Pop
    wanted = []
232 3312b702 Iustin Pop
233 3312b702 Iustin Pop
    for name in instances:
234 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
235 3312b702 Iustin Pop
      if instance is None:
236 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
237 3312b702 Iustin Pop
      wanted.append(instance)
238 3312b702 Iustin Pop
239 3312b702 Iustin Pop
  else:
240 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
241 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
242 dcb93971 Michael Hanselmann
243 dcb93971 Michael Hanselmann
244 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
245 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
246 83120a01 Michael Hanselmann

247 83120a01 Michael Hanselmann
  Args:
248 83120a01 Michael Hanselmann
    static: Static fields
249 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
250 83120a01 Michael Hanselmann

251 83120a01 Michael Hanselmann
  """
252 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
253 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
254 dcb93971 Michael Hanselmann
255 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
256 dcb93971 Michael Hanselmann
257 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
258 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
259 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
260 3ecf6786 Iustin Pop
                                          difference(all_fields)))
261 dcb93971 Michael Hanselmann
262 dcb93971 Michael Hanselmann
263 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
264 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
265 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
266 ecb215b5 Michael Hanselmann

267 ecb215b5 Michael Hanselmann
  Args:
268 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
269 396e1b78 Michael Hanselmann
  """
270 396e1b78 Michael Hanselmann
  env = {
271 0e137c28 Iustin Pop
    "OP_TARGET": name,
272 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
273 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
274 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
275 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
276 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
277 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
278 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
279 396e1b78 Michael Hanselmann
  }
280 396e1b78 Michael Hanselmann
281 396e1b78 Michael Hanselmann
  if nics:
282 396e1b78 Michael Hanselmann
    nic_count = len(nics)
283 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
284 396e1b78 Michael Hanselmann
      if ip is None:
285 396e1b78 Michael Hanselmann
        ip = ""
286 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
287 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
288 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
289 396e1b78 Michael Hanselmann
  else:
290 396e1b78 Michael Hanselmann
    nic_count = 0
291 396e1b78 Michael Hanselmann
292 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
293 396e1b78 Michael Hanselmann
294 396e1b78 Michael Hanselmann
  return env
295 396e1b78 Michael Hanselmann
296 396e1b78 Michael Hanselmann
297 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
298 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
299 ecb215b5 Michael Hanselmann

300 ecb215b5 Michael Hanselmann
  Args:
301 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
302 ecb215b5 Michael Hanselmann
    override: dict of values to override
303 ecb215b5 Michael Hanselmann
  """
304 396e1b78 Michael Hanselmann
  args = {
305 396e1b78 Michael Hanselmann
    'name': instance.name,
306 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
307 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
308 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
309 396e1b78 Michael Hanselmann
    'status': instance.os,
310 396e1b78 Michael Hanselmann
    'memory': instance.memory,
311 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
312 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
313 396e1b78 Michael Hanselmann
  }
314 396e1b78 Michael Hanselmann
  if override:
315 396e1b78 Michael Hanselmann
    args.update(override)
316 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
317 396e1b78 Michael Hanselmann
318 396e1b78 Michael Hanselmann
319 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
320 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
321 a8083063 Iustin Pop

322 a8083063 Iustin Pop
  Args:
323 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
324 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
325 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
326 a8083063 Iustin Pop

327 a8083063 Iustin Pop
  """
328 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
329 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
330 a8083063 Iustin Pop
  else:
331 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
332 a8083063 Iustin Pop
333 a8083063 Iustin Pop
  inthere = False
334 a8083063 Iustin Pop
335 a8083063 Iustin Pop
  save_lines = []
336 a8083063 Iustin Pop
  add_lines = []
337 a8083063 Iustin Pop
  removed = False
338 a8083063 Iustin Pop
339 4cc2a728 Michael Hanselmann
  for rawline in f:
340 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
341 a8083063 Iustin Pop
342 4cc2a728 Michael Hanselmann
    parts = rawline.rstrip('\r\n').split()
343 4cc2a728 Michael Hanselmann
344 4cc2a728 Michael Hanselmann
    # Ignore unwanted lines
345 4cc2a728 Michael Hanselmann
    if len(parts) >= 3 and not rawline.lstrip()[0] == '#':
346 4cc2a728 Michael Hanselmann
      fields = parts[0].split(',')
347 4cc2a728 Michael Hanselmann
      key = parts[2]
348 4cc2a728 Michael Hanselmann
349 4cc2a728 Michael Hanselmann
      haveall = True
350 4cc2a728 Michael Hanselmann
      havesome = False
351 4cc2a728 Michael Hanselmann
      for spec in [ ip, fullnode ]:
352 4cc2a728 Michael Hanselmann
        if spec not in fields:
353 4cc2a728 Michael Hanselmann
          haveall = False
354 4cc2a728 Michael Hanselmann
        if spec in fields:
355 4cc2a728 Michael Hanselmann
          havesome = True
356 4cc2a728 Michael Hanselmann
357 4cc2a728 Michael Hanselmann
      logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
358 4cc2a728 Michael Hanselmann
      if haveall and key == pubkey:
359 4cc2a728 Michael Hanselmann
        inthere = True
360 4cc2a728 Michael Hanselmann
        save_lines.append(rawline)
361 4cc2a728 Michael Hanselmann
        logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
362 4cc2a728 Michael Hanselmann
        continue
363 4cc2a728 Michael Hanselmann
364 4cc2a728 Michael Hanselmann
      if havesome and (not haveall or key != pubkey):
365 4cc2a728 Michael Hanselmann
        removed = True
366 4cc2a728 Michael Hanselmann
        logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
367 4cc2a728 Michael Hanselmann
        continue
368 a8083063 Iustin Pop
369 a8083063 Iustin Pop
    save_lines.append(rawline)
370 a8083063 Iustin Pop
371 a8083063 Iustin Pop
  if not inthere:
372 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
373 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
374 a8083063 Iustin Pop
375 a8083063 Iustin Pop
  if removed:
376 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
377 a8083063 Iustin Pop
378 a8083063 Iustin Pop
    # Write a new file and replace old.
379 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
380 82122173 Iustin Pop
                                   constants.DATA_DIR)
381 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
382 82122173 Iustin Pop
    try:
383 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
384 82122173 Iustin Pop
    finally:
385 82122173 Iustin Pop
      newfile.close()
386 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
387 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
388 a8083063 Iustin Pop
389 a8083063 Iustin Pop
  elif add_lines:
390 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
391 a8083063 Iustin Pop
    f.seek(0, 2)
392 a8083063 Iustin Pop
    for add in add_lines:
393 a8083063 Iustin Pop
      f.write(add)
394 a8083063 Iustin Pop
395 a8083063 Iustin Pop
  f.close()
396 a8083063 Iustin Pop
397 a8083063 Iustin Pop
398 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
399 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
400 a8083063 Iustin Pop

401 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
402 a8083063 Iustin Pop
  is the error message.
403 a8083063 Iustin Pop

404 a8083063 Iustin Pop
  """
405 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
406 a8083063 Iustin Pop
  if vgsize is None:
407 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
408 a8083063 Iustin Pop
  elif vgsize < 20480:
409 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
410 191a8385 Guido Trotter
            (vgname, vgsize))
411 a8083063 Iustin Pop
  return None
412 a8083063 Iustin Pop
413 a8083063 Iustin Pop
414 a8083063 Iustin Pop
def _InitSSHSetup(node):
415 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
416 a8083063 Iustin Pop

417 a8083063 Iustin Pop

418 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
419 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
420 a8083063 Iustin Pop

421 a8083063 Iustin Pop
  Args:
422 a8083063 Iustin Pop
    node: the name of this host as a fqdn
423 a8083063 Iustin Pop

424 a8083063 Iustin Pop
  """
425 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
426 a8083063 Iustin Pop
427 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
428 70d9e3d8 Iustin Pop
    if os.path.exists(name):
429 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
430 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
431 a8083063 Iustin Pop
432 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
433 70d9e3d8 Iustin Pop
                         "-f", priv_key,
434 a8083063 Iustin Pop
                         "-q", "-N", ""])
435 a8083063 Iustin Pop
  if result.failed:
436 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
437 3ecf6786 Iustin Pop
                             result.output)
438 a8083063 Iustin Pop
439 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
440 a8083063 Iustin Pop
  try:
441 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
442 a8083063 Iustin Pop
  finally:
443 a8083063 Iustin Pop
    f.close()
444 a8083063 Iustin Pop
445 a8083063 Iustin Pop
446 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
447 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
448 a8083063 Iustin Pop

449 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
450 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
451 a8083063 Iustin Pop

452 a8083063 Iustin Pop
  """
453 a8083063 Iustin Pop
  # Create pseudo random password
454 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
455 a8083063 Iustin Pop
  # and write it into sstore
456 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
457 a8083063 Iustin Pop
458 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
459 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
460 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
461 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
462 a8083063 Iustin Pop
  if result.failed:
463 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
464 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
465 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
466 a8083063 Iustin Pop
467 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
468 a8083063 Iustin Pop
469 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
470 a8083063 Iustin Pop
471 a8083063 Iustin Pop
  if result.failed:
472 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
473 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
474 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
475 a8083063 Iustin Pop
476 a8083063 Iustin Pop
477 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
478 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
479 bf6929a2 Alexander Schreiber

480 bf6929a2 Alexander Schreiber
  """
481 bf6929a2 Alexander Schreiber
  # check bridges existance
482 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
483 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
484 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
485 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
486 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
487 bf6929a2 Alexander Schreiber
488 bf6929a2 Alexander Schreiber
489 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
490 a8083063 Iustin Pop
  """Initialise the cluster.
491 a8083063 Iustin Pop

492 a8083063 Iustin Pop
  """
493 a8083063 Iustin Pop
  HPATH = "cluster-init"
494 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
495 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
496 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
497 a8083063 Iustin Pop
  REQ_CLUSTER = False
498 a8083063 Iustin Pop
499 a8083063 Iustin Pop
  def BuildHooksEnv(self):
500 a8083063 Iustin Pop
    """Build hooks env.
501 a8083063 Iustin Pop

502 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
503 a8083063 Iustin Pop
    ourselves in the post-run node list.
504 a8083063 Iustin Pop

505 a8083063 Iustin Pop
    """
506 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
507 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
508 a8083063 Iustin Pop
509 a8083063 Iustin Pop
  def CheckPrereq(self):
510 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
511 a8083063 Iustin Pop

512 a8083063 Iustin Pop
    """
513 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
514 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
515 a8083063 Iustin Pop
516 2a6469d5 Alexander Schreiber
    if self.op.hypervisor_type == constants.HT_XEN_HVM31:
517 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
518 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Please prepare the cluster VNC"
519 2a6469d5 Alexander Schreiber
                                   "password file %s" %
520 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
521 2a6469d5 Alexander Schreiber
522 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
523 ff98055b Iustin Pop
524 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
525 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
526 107711b0 Michael Hanselmann
                                 " range (%s). Please fix DNS or %s." %
527 107711b0 Michael Hanselmann
                                 (hostname.ip, constants.ETC_HOSTS))
528 130e907e Iustin Pop
529 b15d625f Iustin Pop
    if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT,
530 b15d625f Iustin Pop
                         source=constants.LOCALHOST_IP_ADDRESS):
531 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
532 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
533 3ecf6786 Iustin Pop
                                 " belong to this host."
534 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
535 a8083063 Iustin Pop
536 411f8ad0 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
537 411f8ad0 Iustin Pop
538 411f8ad0 Iustin Pop
    if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
539 411f8ad0 Iustin Pop
                     timeout=5):
540 411f8ad0 Iustin Pop
      raise errors.OpPrereqError("Cluster IP already active. Aborting.")
541 411f8ad0 Iustin Pop
542 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
543 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
544 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
545 16abfbc2 Alexander Schreiber
    if (secondary_ip and
546 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
547 b15d625f Iustin Pop
        (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
548 b15d625f Iustin Pop
                           source=constants.LOCALHOST_IP_ADDRESS))):
549 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("You gave %s as secondary IP,"
550 f4bc1f2c Michael Hanselmann
                                 " but it does not belong to this host." %
551 16abfbc2 Alexander Schreiber
                                 secondary_ip)
552 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
553 a8083063 Iustin Pop
554 a8083063 Iustin Pop
    # checks presence of the volume group given
555 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
556 a8083063 Iustin Pop
557 a8083063 Iustin Pop
    if vgstatus:
558 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
559 a8083063 Iustin Pop
560 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
561 a8083063 Iustin Pop
                    self.op.mac_prefix):
562 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
563 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
564 a8083063 Iustin Pop
565 2584d4a4 Alexander Schreiber
    if self.op.hypervisor_type not in constants.HYPER_TYPES:
566 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
567 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
568 a8083063 Iustin Pop
569 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
570 880478f8 Iustin Pop
    if result.failed:
571 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
572 8925faaa Iustin Pop
                                 (self.op.master_netdev,
573 8925faaa Iustin Pop
                                  result.output.strip()))
574 880478f8 Iustin Pop
575 7dd30006 Michael Hanselmann
    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
576 7dd30006 Michael Hanselmann
            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
577 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("Init.d script '%s' missing or not"
578 f4bc1f2c Michael Hanselmann
                                 " executable." % constants.NODE_INITD_SCRIPT)
579 c7b46d59 Iustin Pop
580 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
581 a8083063 Iustin Pop
    """Initialize the cluster.
582 a8083063 Iustin Pop

583 a8083063 Iustin Pop
    """
584 a8083063 Iustin Pop
    clustername = self.clustername
585 a8083063 Iustin Pop
    hostname = self.hostname
586 a8083063 Iustin Pop
587 a8083063 Iustin Pop
    # set up the simple store
588 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
589 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
590 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
591 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
592 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
593 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
594 a8083063 Iustin Pop
595 a8083063 Iustin Pop
    # set up the inter-node password and certificate
596 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
597 a8083063 Iustin Pop
598 a8083063 Iustin Pop
    # start the master ip
599 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
600 a8083063 Iustin Pop
601 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
602 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
603 a8083063 Iustin Pop
    try:
604 a8083063 Iustin Pop
      sshline = f.read()
605 a8083063 Iustin Pop
    finally:
606 a8083063 Iustin Pop
      f.close()
607 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
608 a8083063 Iustin Pop
609 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(hostname.name)
610 a8083063 Iustin Pop
611 bcf043c9 Iustin Pop
    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
612 a8083063 Iustin Pop
613 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
614 a8083063 Iustin Pop
615 a8083063 Iustin Pop
    # init of cluster config file
616 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
617 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
618 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
619 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
620 a8083063 Iustin Pop
621 a8083063 Iustin Pop
622 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
623 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
624 a8083063 Iustin Pop

625 a8083063 Iustin Pop
  """
626 a8083063 Iustin Pop
  _OP_REQP = []
627 a8083063 Iustin Pop
628 a8083063 Iustin Pop
  def CheckPrereq(self):
629 a8083063 Iustin Pop
    """Check prerequisites.
630 a8083063 Iustin Pop

631 a8083063 Iustin Pop
    This checks whether the cluster is empty.
632 a8083063 Iustin Pop

633 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
634 a8083063 Iustin Pop

635 a8083063 Iustin Pop
    """
636 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
637 a8083063 Iustin Pop
638 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
639 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
640 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
641 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
642 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
643 db915bd1 Michael Hanselmann
    if instancelist:
644 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
645 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
646 a8083063 Iustin Pop
647 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
648 a8083063 Iustin Pop
    """Destroys the cluster.
649 a8083063 Iustin Pop

650 a8083063 Iustin Pop
    """
651 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
652 c9064964 Iustin Pop
    if not rpc.call_node_stop_master(master):
653 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
654 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
655 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
656 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
657 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
658 a8083063 Iustin Pop
659 a8083063 Iustin Pop
660 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
661 a8083063 Iustin Pop
  """Verifies the cluster status.
662 a8083063 Iustin Pop

663 a8083063 Iustin Pop
  """
664 a8083063 Iustin Pop
  _OP_REQP = []
665 a8083063 Iustin Pop
666 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
667 a8083063 Iustin Pop
                  remote_version, feedback_fn):
668 a8083063 Iustin Pop
    """Run multiple tests against a node.
669 a8083063 Iustin Pop

670 a8083063 Iustin Pop
    Test list:
671 a8083063 Iustin Pop
      - compares ganeti version
672 a8083063 Iustin Pop
      - checks vg existance and size > 20G
673 a8083063 Iustin Pop
      - checks config file checksum
674 a8083063 Iustin Pop
      - checks ssh to other nodes
675 a8083063 Iustin Pop

676 a8083063 Iustin Pop
    Args:
677 a8083063 Iustin Pop
      node: name of the node to check
678 a8083063 Iustin Pop
      file_list: required list of files
679 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
680 098c0958 Michael Hanselmann

681 a8083063 Iustin Pop
    """
682 a8083063 Iustin Pop
    # compares ganeti version
683 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
684 a8083063 Iustin Pop
    if not remote_version:
685 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
686 a8083063 Iustin Pop
      return True
687 a8083063 Iustin Pop
688 a8083063 Iustin Pop
    if local_version != remote_version:
689 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
690 a8083063 Iustin Pop
                      (local_version, node, remote_version))
691 a8083063 Iustin Pop
      return True
692 a8083063 Iustin Pop
693 a8083063 Iustin Pop
    # checks vg existance and size > 20G
694 a8083063 Iustin Pop
695 a8083063 Iustin Pop
    bad = False
696 a8083063 Iustin Pop
    if not vglist:
697 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
698 a8083063 Iustin Pop
                      (node,))
699 a8083063 Iustin Pop
      bad = True
700 a8083063 Iustin Pop
    else:
701 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
702 a8083063 Iustin Pop
      if vgstatus:
703 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
704 a8083063 Iustin Pop
        bad = True
705 a8083063 Iustin Pop
706 a8083063 Iustin Pop
    # checks config file checksum
707 a8083063 Iustin Pop
    # checks ssh to any
708 a8083063 Iustin Pop
709 a8083063 Iustin Pop
    if 'filelist' not in node_result:
710 a8083063 Iustin Pop
      bad = True
711 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
712 a8083063 Iustin Pop
    else:
713 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
714 a8083063 Iustin Pop
      for file_name in file_list:
715 a8083063 Iustin Pop
        if file_name not in remote_cksum:
716 a8083063 Iustin Pop
          bad = True
717 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
718 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
719 a8083063 Iustin Pop
          bad = True
720 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
721 a8083063 Iustin Pop
722 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
723 a8083063 Iustin Pop
      bad = True
724 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
725 a8083063 Iustin Pop
    else:
726 a8083063 Iustin Pop
      if node_result['nodelist']:
727 a8083063 Iustin Pop
        bad = True
728 a8083063 Iustin Pop
        for node in node_result['nodelist']:
729 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
730 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
731 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
732 a8083063 Iustin Pop
    if hyp_result is not None:
733 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
734 a8083063 Iustin Pop
    return bad
735 a8083063 Iustin Pop
736 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
737 a8083063 Iustin Pop
    """Verify an instance.
738 a8083063 Iustin Pop

739 a8083063 Iustin Pop
    This function checks to see if the required block devices are
740 a8083063 Iustin Pop
    available on the instance's node.
741 a8083063 Iustin Pop

742 a8083063 Iustin Pop
    """
743 a8083063 Iustin Pop
    bad = False
744 a8083063 Iustin Pop
745 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
746 a8083063 Iustin Pop
    if not instance in instancelist:
747 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
748 a8083063 Iustin Pop
                      (instance, instancelist))
749 a8083063 Iustin Pop
      bad = True
750 a8083063 Iustin Pop
751 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
752 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
753 a8083063 Iustin Pop
754 a8083063 Iustin Pop
    node_vol_should = {}
755 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
756 a8083063 Iustin Pop
757 a8083063 Iustin Pop
    for node in node_vol_should:
758 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
759 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
760 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
761 a8083063 Iustin Pop
                          (volume, node))
762 a8083063 Iustin Pop
          bad = True
763 a8083063 Iustin Pop
764 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
765 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
766 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
767 a8083063 Iustin Pop
                        (instance, node_current))
768 a8083063 Iustin Pop
        bad = True
769 a8083063 Iustin Pop
770 a8083063 Iustin Pop
    for node in node_instance:
771 a8083063 Iustin Pop
      if (not node == node_current):
772 a8083063 Iustin Pop
        if instance in node_instance[node]:
773 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
774 a8083063 Iustin Pop
                          (instance, node))
775 a8083063 Iustin Pop
          bad = True
776 a8083063 Iustin Pop
777 6a438c98 Michael Hanselmann
    return bad
778 a8083063 Iustin Pop
779 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
780 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
781 a8083063 Iustin Pop

782 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
783 a8083063 Iustin Pop
    reported as unknown.
784 a8083063 Iustin Pop

785 a8083063 Iustin Pop
    """
786 a8083063 Iustin Pop
    bad = False
787 a8083063 Iustin Pop
788 a8083063 Iustin Pop
    for node in node_vol_is:
789 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
790 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
791 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
792 a8083063 Iustin Pop
                      (volume, node))
793 a8083063 Iustin Pop
          bad = True
794 a8083063 Iustin Pop
    return bad
795 a8083063 Iustin Pop
796 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
797 a8083063 Iustin Pop
    """Verify the list of running instances.
798 a8083063 Iustin Pop

799 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
800 a8083063 Iustin Pop

801 a8083063 Iustin Pop
    """
802 a8083063 Iustin Pop
    bad = False
803 a8083063 Iustin Pop
    for node in node_instance:
804 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
805 a8083063 Iustin Pop
        if runninginstance not in instancelist:
806 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
807 a8083063 Iustin Pop
                          (runninginstance, node))
808 a8083063 Iustin Pop
          bad = True
809 a8083063 Iustin Pop
    return bad
810 a8083063 Iustin Pop
811 a8083063 Iustin Pop
  def CheckPrereq(self):
812 a8083063 Iustin Pop
    """Check prerequisites.
813 a8083063 Iustin Pop

814 a8083063 Iustin Pop
    This has no prerequisites.
815 a8083063 Iustin Pop

816 a8083063 Iustin Pop
    """
817 a8083063 Iustin Pop
    pass
818 a8083063 Iustin Pop
819 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
820 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
821 a8083063 Iustin Pop

822 a8083063 Iustin Pop
    """
823 a8083063 Iustin Pop
    bad = False
824 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
825 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
826 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
827 a8083063 Iustin Pop
828 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
829 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
830 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
831 a8083063 Iustin Pop
    node_volume = {}
832 a8083063 Iustin Pop
    node_instance = {}
833 a8083063 Iustin Pop
834 a8083063 Iustin Pop
    # FIXME: verify OS list
835 a8083063 Iustin Pop
    # do local checksums
836 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
837 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
838 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
839 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
840 a8083063 Iustin Pop
841 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
842 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
843 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
844 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
845 a8083063 Iustin Pop
    node_verify_param = {
846 a8083063 Iustin Pop
      'filelist': file_names,
847 a8083063 Iustin Pop
      'nodelist': nodelist,
848 a8083063 Iustin Pop
      'hypervisor': None,
849 a8083063 Iustin Pop
      }
850 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
851 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
852 a8083063 Iustin Pop
853 a8083063 Iustin Pop
    for node in nodelist:
854 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
855 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
856 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
857 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
858 a8083063 Iustin Pop
      bad = bad or result
859 a8083063 Iustin Pop
860 a8083063 Iustin Pop
      # node_volume
861 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
862 a8083063 Iustin Pop
863 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
864 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
865 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
866 b63ed789 Iustin Pop
        bad = True
867 b63ed789 Iustin Pop
        node_volume[node] = {}
868 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
869 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
870 a8083063 Iustin Pop
        bad = True
871 a8083063 Iustin Pop
        continue
872 b63ed789 Iustin Pop
      else:
873 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
874 a8083063 Iustin Pop
875 a8083063 Iustin Pop
      # node_instance
876 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
877 a8083063 Iustin Pop
      if type(nodeinstance) != list:
878 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
879 a8083063 Iustin Pop
        bad = True
880 a8083063 Iustin Pop
        continue
881 a8083063 Iustin Pop
882 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
883 a8083063 Iustin Pop
884 a8083063 Iustin Pop
    node_vol_should = {}
885 a8083063 Iustin Pop
886 a8083063 Iustin Pop
    for instance in instancelist:
887 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
888 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
889 a8083063 Iustin Pop
                                     feedback_fn)
890 a8083063 Iustin Pop
      bad = bad or result
891 a8083063 Iustin Pop
892 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
893 a8083063 Iustin Pop
894 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
895 a8083063 Iustin Pop
896 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
897 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
898 a8083063 Iustin Pop
                                       feedback_fn)
899 a8083063 Iustin Pop
    bad = bad or result
900 a8083063 Iustin Pop
901 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
902 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
903 a8083063 Iustin Pop
                                         feedback_fn)
904 a8083063 Iustin Pop
    bad = bad or result
905 a8083063 Iustin Pop
906 a8083063 Iustin Pop
    return int(bad)
907 a8083063 Iustin Pop
908 a8083063 Iustin Pop
909 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
910 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
911 2c95a8d4 Iustin Pop

912 2c95a8d4 Iustin Pop
  """
913 2c95a8d4 Iustin Pop
  _OP_REQP = []
914 2c95a8d4 Iustin Pop
915 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
916 2c95a8d4 Iustin Pop
    """Check prerequisites.
917 2c95a8d4 Iustin Pop

918 2c95a8d4 Iustin Pop
    This has no prerequisites.
919 2c95a8d4 Iustin Pop

920 2c95a8d4 Iustin Pop
    """
921 2c95a8d4 Iustin Pop
    pass
922 2c95a8d4 Iustin Pop
923 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
924 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
925 2c95a8d4 Iustin Pop

926 2c95a8d4 Iustin Pop
    """
927 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
928 2c95a8d4 Iustin Pop
929 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
930 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
931 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
932 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
933 2c95a8d4 Iustin Pop
934 2c95a8d4 Iustin Pop
    nv_dict = {}
935 2c95a8d4 Iustin Pop
    for inst in instances:
936 2c95a8d4 Iustin Pop
      inst_lvs = {}
937 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
938 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
939 2c95a8d4 Iustin Pop
        continue
940 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
941 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
942 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
943 2c95a8d4 Iustin Pop
        for vol in vol_list:
944 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
945 2c95a8d4 Iustin Pop
946 2c95a8d4 Iustin Pop
    if not nv_dict:
947 2c95a8d4 Iustin Pop
      return result
948 2c95a8d4 Iustin Pop
949 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
950 2c95a8d4 Iustin Pop
951 2c95a8d4 Iustin Pop
    to_act = set()
952 2c95a8d4 Iustin Pop
    for node in nodes:
953 2c95a8d4 Iustin Pop
      # node_volume
954 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
955 2c95a8d4 Iustin Pop
956 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
957 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
958 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
959 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
960 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
961 2c95a8d4 Iustin Pop
                    (node,))
962 2c95a8d4 Iustin Pop
        res_nodes.append(node)
963 2c95a8d4 Iustin Pop
        continue
964 2c95a8d4 Iustin Pop
965 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
966 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
967 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
968 b63ed789 Iustin Pop
            and inst.name not in res_instances):
969 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
970 2c95a8d4 Iustin Pop
971 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
972 b63ed789 Iustin Pop
    # data better
973 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
974 b63ed789 Iustin Pop
      if inst.name not in res_missing:
975 b63ed789 Iustin Pop
        res_missing[inst.name] = []
976 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
977 b63ed789 Iustin Pop
978 2c95a8d4 Iustin Pop
    return result
979 2c95a8d4 Iustin Pop
980 2c95a8d4 Iustin Pop
981 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
982 07bd8a51 Iustin Pop
  """Rename the cluster.
983 07bd8a51 Iustin Pop

984 07bd8a51 Iustin Pop
  """
985 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
986 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
987 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
988 07bd8a51 Iustin Pop
989 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
990 07bd8a51 Iustin Pop
    """Build hooks env.
991 07bd8a51 Iustin Pop

992 07bd8a51 Iustin Pop
    """
993 07bd8a51 Iustin Pop
    env = {
994 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
995 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
996 07bd8a51 Iustin Pop
      }
997 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
998 07bd8a51 Iustin Pop
    return env, [mn], [mn]
999 07bd8a51 Iustin Pop
1000 07bd8a51 Iustin Pop
  def CheckPrereq(self):
1001 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
1002 07bd8a51 Iustin Pop

1003 07bd8a51 Iustin Pop
    """
1004 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
1005 07bd8a51 Iustin Pop
1006 bcf043c9 Iustin Pop
    new_name = hostname.name
1007 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
1008 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
1009 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
1010 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
1011 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
1012 07bd8a51 Iustin Pop
                                 " cluster has changed")
1013 07bd8a51 Iustin Pop
    if new_ip != old_ip:
1014 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
1015 07bd8a51 Iustin Pop
      if not result.failed:
1016 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1017 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1018 07bd8a51 Iustin Pop
                                   new_ip)
1019 07bd8a51 Iustin Pop
1020 07bd8a51 Iustin Pop
    self.op.name = new_name
1021 07bd8a51 Iustin Pop
1022 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1023 07bd8a51 Iustin Pop
    """Rename the cluster.
1024 07bd8a51 Iustin Pop

1025 07bd8a51 Iustin Pop
    """
1026 07bd8a51 Iustin Pop
    clustername = self.op.name
1027 07bd8a51 Iustin Pop
    ip = self.ip
1028 07bd8a51 Iustin Pop
    ss = self.sstore
1029 07bd8a51 Iustin Pop
1030 07bd8a51 Iustin Pop
    # shutdown the master IP
1031 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1032 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
1033 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1034 07bd8a51 Iustin Pop
1035 07bd8a51 Iustin Pop
    try:
1036 07bd8a51 Iustin Pop
      # modify the sstore
1037 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1038 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1039 07bd8a51 Iustin Pop
1040 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1041 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1042 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1043 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1044 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1045 07bd8a51 Iustin Pop
1046 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1047 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1048 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1049 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1050 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1051 07bd8a51 Iustin Pop
          if not result[to_node]:
1052 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1053 07bd8a51 Iustin Pop
                         (fname, to_node))
1054 07bd8a51 Iustin Pop
    finally:
1055 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
1056 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1057 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1058 07bd8a51 Iustin Pop
1059 07bd8a51 Iustin Pop
1060 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1061 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1062 a8083063 Iustin Pop

1063 a8083063 Iustin Pop
  """
1064 a8083063 Iustin Pop
  if not instance.disks:
1065 a8083063 Iustin Pop
    return True
1066 a8083063 Iustin Pop
1067 a8083063 Iustin Pop
  if not oneshot:
1068 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1069 a8083063 Iustin Pop
1070 a8083063 Iustin Pop
  node = instance.primary_node
1071 a8083063 Iustin Pop
1072 a8083063 Iustin Pop
  for dev in instance.disks:
1073 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1074 a8083063 Iustin Pop
1075 a8083063 Iustin Pop
  retries = 0
1076 a8083063 Iustin Pop
  while True:
1077 a8083063 Iustin Pop
    max_time = 0
1078 a8083063 Iustin Pop
    done = True
1079 a8083063 Iustin Pop
    cumul_degraded = False
1080 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1081 a8083063 Iustin Pop
    if not rstats:
1082 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1083 a8083063 Iustin Pop
      retries += 1
1084 a8083063 Iustin Pop
      if retries >= 10:
1085 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1086 3ecf6786 Iustin Pop
                                 " aborting." % node)
1087 a8083063 Iustin Pop
      time.sleep(6)
1088 a8083063 Iustin Pop
      continue
1089 a8083063 Iustin Pop
    retries = 0
1090 a8083063 Iustin Pop
    for i in range(len(rstats)):
1091 a8083063 Iustin Pop
      mstat = rstats[i]
1092 a8083063 Iustin Pop
      if mstat is None:
1093 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1094 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1095 a8083063 Iustin Pop
        continue
1096 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1097 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1098 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1099 a8083063 Iustin Pop
      if perc_done is not None:
1100 a8083063 Iustin Pop
        done = False
1101 a8083063 Iustin Pop
        if est_time is not None:
1102 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1103 a8083063 Iustin Pop
          max_time = est_time
1104 a8083063 Iustin Pop
        else:
1105 a8083063 Iustin Pop
          rem_time = "no time estimate"
1106 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1107 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1108 a8083063 Iustin Pop
    if done or oneshot:
1109 a8083063 Iustin Pop
      break
1110 a8083063 Iustin Pop
1111 a8083063 Iustin Pop
    if unlock:
1112 a8083063 Iustin Pop
      utils.Unlock('cmd')
1113 a8083063 Iustin Pop
    try:
1114 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1115 a8083063 Iustin Pop
    finally:
1116 a8083063 Iustin Pop
      if unlock:
1117 a8083063 Iustin Pop
        utils.Lock('cmd')
1118 a8083063 Iustin Pop
1119 a8083063 Iustin Pop
  if done:
1120 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1121 a8083063 Iustin Pop
  return not cumul_degraded
1122 a8083063 Iustin Pop
1123 a8083063 Iustin Pop
1124 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1125 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1126 a8083063 Iustin Pop

1127 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1128 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1129 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1130 0834c866 Iustin Pop

1131 a8083063 Iustin Pop
  """
1132 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1133 0834c866 Iustin Pop
  if ldisk:
1134 0834c866 Iustin Pop
    idx = 6
1135 0834c866 Iustin Pop
  else:
1136 0834c866 Iustin Pop
    idx = 5
1137 a8083063 Iustin Pop
1138 a8083063 Iustin Pop
  result = True
1139 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1140 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1141 a8083063 Iustin Pop
    if not rstats:
1142 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1143 a8083063 Iustin Pop
      result = False
1144 a8083063 Iustin Pop
    else:
1145 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1146 a8083063 Iustin Pop
  if dev.children:
1147 a8083063 Iustin Pop
    for child in dev.children:
1148 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1149 a8083063 Iustin Pop
1150 a8083063 Iustin Pop
  return result
1151 a8083063 Iustin Pop
1152 a8083063 Iustin Pop
1153 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1154 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1155 a8083063 Iustin Pop

1156 a8083063 Iustin Pop
  """
1157 a8083063 Iustin Pop
  _OP_REQP = []
1158 a8083063 Iustin Pop
1159 a8083063 Iustin Pop
  def CheckPrereq(self):
1160 a8083063 Iustin Pop
    """Check prerequisites.
1161 a8083063 Iustin Pop

1162 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1163 a8083063 Iustin Pop

1164 a8083063 Iustin Pop
    """
1165 a8083063 Iustin Pop
    return
1166 a8083063 Iustin Pop
1167 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1168 a8083063 Iustin Pop
    """Compute the list of OSes.
1169 a8083063 Iustin Pop

1170 a8083063 Iustin Pop
    """
1171 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1172 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1173 a8083063 Iustin Pop
    if node_data == False:
1174 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1175 a8083063 Iustin Pop
    return node_data
1176 a8083063 Iustin Pop
1177 a8083063 Iustin Pop
1178 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1179 a8083063 Iustin Pop
  """Logical unit for removing a node.
1180 a8083063 Iustin Pop

1181 a8083063 Iustin Pop
  """
1182 a8083063 Iustin Pop
  HPATH = "node-remove"
1183 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1184 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1185 a8083063 Iustin Pop
1186 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1187 a8083063 Iustin Pop
    """Build hooks env.
1188 a8083063 Iustin Pop

1189 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1190 a8083063 Iustin Pop
    node would not allows itself to run.
1191 a8083063 Iustin Pop

1192 a8083063 Iustin Pop
    """
1193 396e1b78 Michael Hanselmann
    env = {
1194 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1195 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1196 396e1b78 Michael Hanselmann
      }
1197 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1198 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1199 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1200 a8083063 Iustin Pop
1201 a8083063 Iustin Pop
  def CheckPrereq(self):
1202 a8083063 Iustin Pop
    """Check prerequisites.
1203 a8083063 Iustin Pop

1204 a8083063 Iustin Pop
    This checks:
1205 a8083063 Iustin Pop
     - the node exists in the configuration
1206 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1207 a8083063 Iustin Pop
     - it's not the master
1208 a8083063 Iustin Pop

1209 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1210 a8083063 Iustin Pop

1211 a8083063 Iustin Pop
    """
1212 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1213 a8083063 Iustin Pop
    if node is None:
1214 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1215 a8083063 Iustin Pop
1216 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1217 a8083063 Iustin Pop
1218 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1219 a8083063 Iustin Pop
    if node.name == masternode:
1220 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1221 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1222 a8083063 Iustin Pop
1223 a8083063 Iustin Pop
    for instance_name in instance_list:
1224 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1225 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1226 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1227 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1228 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1229 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1230 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1231 a8083063 Iustin Pop
    self.op.node_name = node.name
1232 a8083063 Iustin Pop
    self.node = node
1233 a8083063 Iustin Pop
1234 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1235 a8083063 Iustin Pop
    """Removes the node from the cluster.
1236 a8083063 Iustin Pop

1237 a8083063 Iustin Pop
    """
1238 a8083063 Iustin Pop
    node = self.node
1239 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1240 a8083063 Iustin Pop
                node.name)
1241 a8083063 Iustin Pop
1242 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1243 a8083063 Iustin Pop
1244 c92b310a Michael Hanselmann
    self.ssh.Run(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1245 a8083063 Iustin Pop
1246 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1247 a8083063 Iustin Pop
1248 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1249 a8083063 Iustin Pop
1250 c8a0948f Michael Hanselmann
    _RemoveHostFromEtcHosts(node.name)
1251 c8a0948f Michael Hanselmann
1252 a8083063 Iustin Pop
1253 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1254 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1255 a8083063 Iustin Pop

1256 a8083063 Iustin Pop
  """
1257 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1258 a8083063 Iustin Pop
1259 a8083063 Iustin Pop
  def CheckPrereq(self):
1260 a8083063 Iustin Pop
    """Check prerequisites.
1261 a8083063 Iustin Pop

1262 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1263 a8083063 Iustin Pop

1264 a8083063 Iustin Pop
    """
1265 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1266 3ef10550 Michael Hanselmann
                                     "mtotal", "mnode", "mfree",
1267 3ef10550 Michael Hanselmann
                                     "bootid"])
1268 a8083063 Iustin Pop
1269 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1270 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1271 ec223efb Iustin Pop
                               "pip", "sip"],
1272 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1273 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1274 a8083063 Iustin Pop
1275 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1276 a8083063 Iustin Pop
1277 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1278 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1279 a8083063 Iustin Pop

1280 a8083063 Iustin Pop
    """
1281 246e180a Iustin Pop
    nodenames = self.wanted
1282 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1283 a8083063 Iustin Pop
1284 a8083063 Iustin Pop
    # begin data gathering
1285 a8083063 Iustin Pop
1286 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1287 a8083063 Iustin Pop
      live_data = {}
1288 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1289 a8083063 Iustin Pop
      for name in nodenames:
1290 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1291 a8083063 Iustin Pop
        if nodeinfo:
1292 a8083063 Iustin Pop
          live_data[name] = {
1293 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1294 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1295 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1296 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1297 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1298 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1299 a8083063 Iustin Pop
            }
1300 a8083063 Iustin Pop
        else:
1301 a8083063 Iustin Pop
          live_data[name] = {}
1302 a8083063 Iustin Pop
    else:
1303 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1304 a8083063 Iustin Pop
1305 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1306 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1307 a8083063 Iustin Pop
1308 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1309 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1310 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1311 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1312 a8083063 Iustin Pop
1313 ec223efb Iustin Pop
      for instance_name in instancelist:
1314 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1315 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1316 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1317 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1318 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1319 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1320 a8083063 Iustin Pop
1321 a8083063 Iustin Pop
    # end data gathering
1322 a8083063 Iustin Pop
1323 a8083063 Iustin Pop
    output = []
1324 a8083063 Iustin Pop
    for node in nodelist:
1325 a8083063 Iustin Pop
      node_output = []
1326 a8083063 Iustin Pop
      for field in self.op.output_fields:
1327 a8083063 Iustin Pop
        if field == "name":
1328 a8083063 Iustin Pop
          val = node.name
1329 ec223efb Iustin Pop
        elif field == "pinst_list":
1330 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1331 ec223efb Iustin Pop
        elif field == "sinst_list":
1332 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1333 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1334 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1335 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1336 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1337 a8083063 Iustin Pop
        elif field == "pip":
1338 a8083063 Iustin Pop
          val = node.primary_ip
1339 a8083063 Iustin Pop
        elif field == "sip":
1340 a8083063 Iustin Pop
          val = node.secondary_ip
1341 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1342 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1343 a8083063 Iustin Pop
        else:
1344 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1345 a8083063 Iustin Pop
        node_output.append(val)
1346 a8083063 Iustin Pop
      output.append(node_output)
1347 a8083063 Iustin Pop
1348 a8083063 Iustin Pop
    return output
1349 a8083063 Iustin Pop
1350 a8083063 Iustin Pop
1351 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1352 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1353 dcb93971 Michael Hanselmann

1354 dcb93971 Michael Hanselmann
  """
1355 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1356 dcb93971 Michael Hanselmann
1357 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1358 dcb93971 Michael Hanselmann
    """Check prerequisites.
1359 dcb93971 Michael Hanselmann

1360 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1361 dcb93971 Michael Hanselmann

1362 dcb93971 Michael Hanselmann
    """
1363 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1364 dcb93971 Michael Hanselmann
1365 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1366 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1367 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1368 dcb93971 Michael Hanselmann
1369 dcb93971 Michael Hanselmann
1370 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1371 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1372 dcb93971 Michael Hanselmann

1373 dcb93971 Michael Hanselmann
    """
1374 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1375 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1376 dcb93971 Michael Hanselmann
1377 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1378 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1379 dcb93971 Michael Hanselmann
1380 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1381 dcb93971 Michael Hanselmann
1382 dcb93971 Michael Hanselmann
    output = []
1383 dcb93971 Michael Hanselmann
    for node in nodenames:
1384 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1385 37d19eb2 Michael Hanselmann
        continue
1386 37d19eb2 Michael Hanselmann
1387 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1388 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1389 dcb93971 Michael Hanselmann
1390 dcb93971 Michael Hanselmann
      for vol in node_vols:
1391 dcb93971 Michael Hanselmann
        node_output = []
1392 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1393 dcb93971 Michael Hanselmann
          if field == "node":
1394 dcb93971 Michael Hanselmann
            val = node
1395 dcb93971 Michael Hanselmann
          elif field == "phys":
1396 dcb93971 Michael Hanselmann
            val = vol['dev']
1397 dcb93971 Michael Hanselmann
          elif field == "vg":
1398 dcb93971 Michael Hanselmann
            val = vol['vg']
1399 dcb93971 Michael Hanselmann
          elif field == "name":
1400 dcb93971 Michael Hanselmann
            val = vol['name']
1401 dcb93971 Michael Hanselmann
          elif field == "size":
1402 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1403 dcb93971 Michael Hanselmann
          elif field == "instance":
1404 dcb93971 Michael Hanselmann
            for inst in ilist:
1405 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1406 dcb93971 Michael Hanselmann
                continue
1407 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1408 dcb93971 Michael Hanselmann
                val = inst.name
1409 dcb93971 Michael Hanselmann
                break
1410 dcb93971 Michael Hanselmann
            else:
1411 dcb93971 Michael Hanselmann
              val = '-'
1412 dcb93971 Michael Hanselmann
          else:
1413 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1414 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1415 dcb93971 Michael Hanselmann
1416 dcb93971 Michael Hanselmann
        output.append(node_output)
1417 dcb93971 Michael Hanselmann
1418 dcb93971 Michael Hanselmann
    return output
1419 dcb93971 Michael Hanselmann
1420 dcb93971 Michael Hanselmann
1421 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1422 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1423 a8083063 Iustin Pop

1424 a8083063 Iustin Pop
  """
1425 a8083063 Iustin Pop
  HPATH = "node-add"
1426 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1427 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1428 a8083063 Iustin Pop
1429 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1430 a8083063 Iustin Pop
    """Build hooks env.
1431 a8083063 Iustin Pop

1432 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1433 a8083063 Iustin Pop

1434 a8083063 Iustin Pop
    """
1435 a8083063 Iustin Pop
    env = {
1436 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1437 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1438 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1439 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1440 a8083063 Iustin Pop
      }
1441 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1442 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1443 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1444 a8083063 Iustin Pop
1445 a8083063 Iustin Pop
  def CheckPrereq(self):
1446 a8083063 Iustin Pop
    """Check prerequisites.
1447 a8083063 Iustin Pop

1448 a8083063 Iustin Pop
    This checks:
1449 a8083063 Iustin Pop
     - the new node is not already in the config
1450 a8083063 Iustin Pop
     - it is resolvable
1451 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1452 a8083063 Iustin Pop

1453 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1454 a8083063 Iustin Pop

1455 a8083063 Iustin Pop
    """
1456 a8083063 Iustin Pop
    node_name = self.op.node_name
1457 a8083063 Iustin Pop
    cfg = self.cfg
1458 a8083063 Iustin Pop
1459 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1460 a8083063 Iustin Pop
1461 bcf043c9 Iustin Pop
    node = dns_data.name
1462 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1463 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1464 a8083063 Iustin Pop
    if secondary_ip is None:
1465 a8083063 Iustin Pop
      secondary_ip = primary_ip
1466 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1467 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1468 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1469 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1470 a8083063 Iustin Pop
    if node in node_list:
1471 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1472 3ecf6786 Iustin Pop
                                 % node)
1473 a8083063 Iustin Pop
1474 a8083063 Iustin Pop
    for existing_node_name in node_list:
1475 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1476 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1477 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1478 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1479 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1480 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1481 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1482 a8083063 Iustin Pop
1483 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1484 a8083063 Iustin Pop
    # same as for the master
1485 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1486 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1487 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1488 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1489 a8083063 Iustin Pop
      if master_singlehomed:
1490 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1491 3ecf6786 Iustin Pop
                                   " new node has one")
1492 a8083063 Iustin Pop
      else:
1493 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1494 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1495 a8083063 Iustin Pop
1496 a8083063 Iustin Pop
    # checks reachablity
1497 b15d625f Iustin Pop
    if not utils.TcpPing(primary_ip, constants.DEFAULT_NODED_PORT):
1498 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1499 a8083063 Iustin Pop
1500 a8083063 Iustin Pop
    if not newbie_singlehomed:
1501 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1502 b15d625f Iustin Pop
      if not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
1503 b15d625f Iustin Pop
                           source=myself.secondary_ip):
1504 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1505 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1506 a8083063 Iustin Pop
1507 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1508 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1509 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1510 a8083063 Iustin Pop
1511 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1512 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
1513 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Cluster VNC password file %s missing" %
1514 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
1515 2a6469d5 Alexander Schreiber
1516 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1517 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1518 a8083063 Iustin Pop

1519 a8083063 Iustin Pop
    """
1520 a8083063 Iustin Pop
    new_node = self.new_node
1521 a8083063 Iustin Pop
    node = new_node.name
1522 a8083063 Iustin Pop
1523 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1524 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1525 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1526 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1527 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1528 a8083063 Iustin Pop
    try:
1529 a8083063 Iustin Pop
      gntpem = f.read(8192)
1530 a8083063 Iustin Pop
    finally:
1531 a8083063 Iustin Pop
      f.close()
1532 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1533 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1534 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1535 a8083063 Iustin Pop
    # parsed by the shell sequence below
1536 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1537 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1538 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1539 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1540 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1541 a8083063 Iustin Pop
1542 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1543 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1544 a8083063 Iustin Pop
    # either by being constants or by the checks above
1545 a8083063 Iustin Pop
    ss = self.sstore
1546 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1547 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1548 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1549 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1550 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1551 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1552 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1553 a8083063 Iustin Pop
1554 c92b310a Michael Hanselmann
    result = self.ssh.Run(node, 'root', mycommand, batch=False, ask_key=True)
1555 a8083063 Iustin Pop
    if result.failed:
1556 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1557 3ecf6786 Iustin Pop
                               " output: %s" %
1558 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1559 a8083063 Iustin Pop
1560 a8083063 Iustin Pop
    # check connectivity
1561 a8083063 Iustin Pop
    time.sleep(4)
1562 a8083063 Iustin Pop
1563 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1564 a8083063 Iustin Pop
    if result:
1565 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1566 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1567 a8083063 Iustin Pop
                    (node, result))
1568 a8083063 Iustin Pop
      else:
1569 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1570 3ecf6786 Iustin Pop
                                 " node version %s" %
1571 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1572 a8083063 Iustin Pop
    else:
1573 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1574 a8083063 Iustin Pop
1575 a8083063 Iustin Pop
    # setup ssh on node
1576 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1577 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1578 a8083063 Iustin Pop
    keyarray = []
1579 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1580 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1581 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1582 a8083063 Iustin Pop
1583 a8083063 Iustin Pop
    for i in keyfiles:
1584 a8083063 Iustin Pop
      f = open(i, 'r')
1585 a8083063 Iustin Pop
      try:
1586 a8083063 Iustin Pop
        keyarray.append(f.read())
1587 a8083063 Iustin Pop
      finally:
1588 a8083063 Iustin Pop
        f.close()
1589 a8083063 Iustin Pop
1590 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1591 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1592 a8083063 Iustin Pop
1593 a8083063 Iustin Pop
    if not result:
1594 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1595 a8083063 Iustin Pop
1596 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1597 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(new_node.name)
1598 c8a0948f Michael Hanselmann
1599 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1600 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1601 a8083063 Iustin Pop
1602 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1603 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1604 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1605 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1606 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1607 16abfbc2 Alexander Schreiber
                                    10, False):
1608 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1609 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1610 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1611 a8083063 Iustin Pop
1612 c92b310a Michael Hanselmann
    success, msg = self.ssh.VerifyNodeHostname(node)
1613 ff98055b Iustin Pop
    if not success:
1614 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1615 f4bc1f2c Michael Hanselmann
                               " than the one the resolver gives: %s."
1616 f4bc1f2c Michael Hanselmann
                               " Please fix and re-run this command." %
1617 ff98055b Iustin Pop
                               (node, msg))
1618 ff98055b Iustin Pop
1619 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1620 a8083063 Iustin Pop
    # including the node just added
1621 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1622 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1623 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1624 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1625 a8083063 Iustin Pop
1626 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1627 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1628 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1629 a8083063 Iustin Pop
      for to_node in dist_nodes:
1630 a8083063 Iustin Pop
        if not result[to_node]:
1631 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1632 a8083063 Iustin Pop
                       (fname, to_node))
1633 a8083063 Iustin Pop
1634 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1635 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1636 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1637 a8083063 Iustin Pop
    for fname in to_copy:
1638 c92b310a Michael Hanselmann
      if not self.ssh.CopyFileToNode(node, fname):
1639 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1640 a8083063 Iustin Pop
1641 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1642 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1643 a8083063 Iustin Pop
1644 a8083063 Iustin Pop
1645 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1646 a8083063 Iustin Pop
  """Failover the master node to the current node.
1647 a8083063 Iustin Pop

1648 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1649 a8083063 Iustin Pop

1650 a8083063 Iustin Pop
  """
1651 a8083063 Iustin Pop
  HPATH = "master-failover"
1652 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1653 a8083063 Iustin Pop
  REQ_MASTER = False
1654 a8083063 Iustin Pop
  _OP_REQP = []
1655 a8083063 Iustin Pop
1656 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1657 a8083063 Iustin Pop
    """Build hooks env.
1658 a8083063 Iustin Pop

1659 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1660 a8083063 Iustin Pop
    the nodes in the post phase.
1661 a8083063 Iustin Pop

1662 a8083063 Iustin Pop
    """
1663 a8083063 Iustin Pop
    env = {
1664 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1665 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1666 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1667 a8083063 Iustin Pop
      }
1668 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1669 a8083063 Iustin Pop
1670 a8083063 Iustin Pop
  def CheckPrereq(self):
1671 a8083063 Iustin Pop
    """Check prerequisites.
1672 a8083063 Iustin Pop

1673 a8083063 Iustin Pop
    This checks that we are not already the master.
1674 a8083063 Iustin Pop

1675 a8083063 Iustin Pop
    """
1676 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1677 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1678 a8083063 Iustin Pop
1679 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1680 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1681 f4bc1f2c Michael Hanselmann
                                 " where you want the new master to be."
1682 f4bc1f2c Michael Hanselmann
                                 " %s is already the master" %
1683 3ecf6786 Iustin Pop
                                 self.old_master)
1684 a8083063 Iustin Pop
1685 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1686 a8083063 Iustin Pop
    """Failover the master node.
1687 a8083063 Iustin Pop

1688 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1689 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1690 a8083063 Iustin Pop
    master.
1691 a8083063 Iustin Pop

1692 a8083063 Iustin Pop
    """
1693 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1694 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1695 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1696 a8083063 Iustin Pop
1697 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1698 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1699 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1700 a8083063 Iustin Pop
1701 880478f8 Iustin Pop
    ss = self.sstore
1702 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1703 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1704 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1705 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1706 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1707 880478f8 Iustin Pop
1708 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1709 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1710 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1711 f4bc1f2c Michael Hanselmann
      feedback_fn("Error in activating the master IP on the new master,"
1712 f4bc1f2c Michael Hanselmann
                  " please fix manually.")
1713 a8083063 Iustin Pop
1714 a8083063 Iustin Pop
1715 a8083063 Iustin Pop
1716 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1717 a8083063 Iustin Pop
  """Query cluster configuration.
1718 a8083063 Iustin Pop

1719 a8083063 Iustin Pop
  """
1720 a8083063 Iustin Pop
  _OP_REQP = []
1721 59322403 Iustin Pop
  REQ_MASTER = False
1722 a8083063 Iustin Pop
1723 a8083063 Iustin Pop
  def CheckPrereq(self):
1724 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1725 a8083063 Iustin Pop

1726 a8083063 Iustin Pop
    """
1727 a8083063 Iustin Pop
    pass
1728 a8083063 Iustin Pop
1729 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1730 a8083063 Iustin Pop
    """Return cluster config.
1731 a8083063 Iustin Pop

1732 a8083063 Iustin Pop
    """
1733 a8083063 Iustin Pop
    result = {
1734 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1735 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1736 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1737 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1738 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1739 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1740 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1741 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1742 a8083063 Iustin Pop
      }
1743 a8083063 Iustin Pop
1744 a8083063 Iustin Pop
    return result
1745 a8083063 Iustin Pop
1746 a8083063 Iustin Pop
1747 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1748 a8083063 Iustin Pop
  """Copy file to cluster.
1749 a8083063 Iustin Pop

1750 a8083063 Iustin Pop
  """
1751 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1752 a8083063 Iustin Pop
1753 a8083063 Iustin Pop
  def CheckPrereq(self):
1754 a8083063 Iustin Pop
    """Check prerequisites.
1755 a8083063 Iustin Pop

1756 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1757 a8083063 Iustin Pop
    of nodes is valid.
1758 a8083063 Iustin Pop

1759 a8083063 Iustin Pop
    """
1760 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1761 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1762 dcb93971 Michael Hanselmann
1763 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1764 a8083063 Iustin Pop
1765 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1766 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1767 a8083063 Iustin Pop

1768 a8083063 Iustin Pop
    Args:
1769 a8083063 Iustin Pop
      opts - class with options as members
1770 a8083063 Iustin Pop
      args - list containing a single element, the file name
1771 a8083063 Iustin Pop
    Opts used:
1772 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1773 a8083063 Iustin Pop

1774 a8083063 Iustin Pop
    """
1775 a8083063 Iustin Pop
    filename = self.op.filename
1776 a8083063 Iustin Pop
1777 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
1778 a8083063 Iustin Pop
1779 a7ba5e53 Iustin Pop
    for node in self.nodes:
1780 a8083063 Iustin Pop
      if node == myname:
1781 a8083063 Iustin Pop
        continue
1782 c92b310a Michael Hanselmann
      if not self.ssh.CopyFileToNode(node, filename):
1783 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1784 a8083063 Iustin Pop
1785 a8083063 Iustin Pop
1786 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1787 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1788 a8083063 Iustin Pop

1789 a8083063 Iustin Pop
  """
1790 a8083063 Iustin Pop
  _OP_REQP = []
1791 a8083063 Iustin Pop
1792 a8083063 Iustin Pop
  def CheckPrereq(self):
1793 a8083063 Iustin Pop
    """No prerequisites.
1794 a8083063 Iustin Pop

1795 a8083063 Iustin Pop
    """
1796 a8083063 Iustin Pop
    pass
1797 a8083063 Iustin Pop
1798 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1799 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1800 a8083063 Iustin Pop

1801 a8083063 Iustin Pop
    """
1802 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1803 a8083063 Iustin Pop
1804 a8083063 Iustin Pop
1805 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1806 a8083063 Iustin Pop
  """Run a command on some nodes.
1807 a8083063 Iustin Pop

1808 a8083063 Iustin Pop
  """
1809 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1810 a8083063 Iustin Pop
1811 a8083063 Iustin Pop
  def CheckPrereq(self):
1812 a8083063 Iustin Pop
    """Check prerequisites.
1813 a8083063 Iustin Pop

1814 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1815 a8083063 Iustin Pop

1816 a8083063 Iustin Pop
    """
1817 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1818 a8083063 Iustin Pop
1819 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1820 a8083063 Iustin Pop
    """Run a command on some nodes.
1821 a8083063 Iustin Pop

1822 a8083063 Iustin Pop
    """
1823 a8083063 Iustin Pop
    data = []
1824 a8083063 Iustin Pop
    for node in self.nodes:
1825 c92b310a Michael Hanselmann
      result = self.ssh.Run(node, "root", self.op.command)
1826 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1827 a8083063 Iustin Pop
1828 a8083063 Iustin Pop
    return data
1829 a8083063 Iustin Pop
1830 a8083063 Iustin Pop
1831 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1832 a8083063 Iustin Pop
  """Bring up an instance's disks.
1833 a8083063 Iustin Pop

1834 a8083063 Iustin Pop
  """
1835 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1836 a8083063 Iustin Pop
1837 a8083063 Iustin Pop
  def CheckPrereq(self):
1838 a8083063 Iustin Pop
    """Check prerequisites.
1839 a8083063 Iustin Pop

1840 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1841 a8083063 Iustin Pop

1842 a8083063 Iustin Pop
    """
1843 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1844 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1845 a8083063 Iustin Pop
    if instance is None:
1846 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1847 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1848 a8083063 Iustin Pop
    self.instance = instance
1849 a8083063 Iustin Pop
1850 a8083063 Iustin Pop
1851 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1852 a8083063 Iustin Pop
    """Activate the disks.
1853 a8083063 Iustin Pop

1854 a8083063 Iustin Pop
    """
1855 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1856 a8083063 Iustin Pop
    if not disks_ok:
1857 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1858 a8083063 Iustin Pop
1859 a8083063 Iustin Pop
    return disks_info
1860 a8083063 Iustin Pop
1861 a8083063 Iustin Pop
1862 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1863 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1864 a8083063 Iustin Pop

1865 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1866 a8083063 Iustin Pop

1867 a8083063 Iustin Pop
  Args:
1868 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1869 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1870 a8083063 Iustin Pop
                        in an error return from the function
1871 a8083063 Iustin Pop

1872 a8083063 Iustin Pop
  Returns:
1873 a8083063 Iustin Pop
    false if the operation failed
1874 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1875 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1876 a8083063 Iustin Pop
  """
1877 a8083063 Iustin Pop
  device_info = []
1878 a8083063 Iustin Pop
  disks_ok = True
1879 fdbd668d Iustin Pop
  iname = instance.name
1880 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1881 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1882 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1883 fdbd668d Iustin Pop
1884 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1885 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1886 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1887 fdbd668d Iustin Pop
  # SyncSource, etc.)
1888 fdbd668d Iustin Pop
1889 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1890 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1891 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1892 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1893 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1894 a8083063 Iustin Pop
      if not result:
1895 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1896 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1897 fdbd668d Iustin Pop
        if not ignore_secondaries:
1898 a8083063 Iustin Pop
          disks_ok = False
1899 fdbd668d Iustin Pop
1900 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1901 fdbd668d Iustin Pop
1902 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1903 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1904 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1905 fdbd668d Iustin Pop
      if node != instance.primary_node:
1906 fdbd668d Iustin Pop
        continue
1907 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1908 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1909 fdbd668d Iustin Pop
      if not result:
1910 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1911 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1912 fdbd668d Iustin Pop
        disks_ok = False
1913 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1914 a8083063 Iustin Pop
1915 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1916 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1917 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1918 b352ab5b Iustin Pop
  for disk in instance.disks:
1919 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1920 b352ab5b Iustin Pop
1921 a8083063 Iustin Pop
  return disks_ok, device_info
1922 a8083063 Iustin Pop
1923 a8083063 Iustin Pop
1924 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1925 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1926 3ecf6786 Iustin Pop

1927 3ecf6786 Iustin Pop
  """
1928 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1929 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1930 fe7b0351 Michael Hanselmann
  if not disks_ok:
1931 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1932 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1933 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1934 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1935 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1936 fe7b0351 Michael Hanselmann
1937 fe7b0351 Michael Hanselmann
1938 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1939 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1940 a8083063 Iustin Pop

1941 a8083063 Iustin Pop
  """
1942 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1943 a8083063 Iustin Pop
1944 a8083063 Iustin Pop
  def CheckPrereq(self):
1945 a8083063 Iustin Pop
    """Check prerequisites.
1946 a8083063 Iustin Pop

1947 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1948 a8083063 Iustin Pop

1949 a8083063 Iustin Pop
    """
1950 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1951 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1952 a8083063 Iustin Pop
    if instance is None:
1953 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1954 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1955 a8083063 Iustin Pop
    self.instance = instance
1956 a8083063 Iustin Pop
1957 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1958 a8083063 Iustin Pop
    """Deactivate the disks
1959 a8083063 Iustin Pop

1960 a8083063 Iustin Pop
    """
1961 a8083063 Iustin Pop
    instance = self.instance
1962 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1963 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1964 a8083063 Iustin Pop
    if not type(ins_l) is list:
1965 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1966 3ecf6786 Iustin Pop
                               instance.primary_node)
1967 a8083063 Iustin Pop
1968 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1969 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1970 3ecf6786 Iustin Pop
                               " block devices.")
1971 a8083063 Iustin Pop
1972 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1973 a8083063 Iustin Pop
1974 a8083063 Iustin Pop
1975 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1976 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1977 a8083063 Iustin Pop

1978 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1979 a8083063 Iustin Pop

1980 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1981 a8083063 Iustin Pop
  ignored.
1982 a8083063 Iustin Pop

1983 a8083063 Iustin Pop
  """
1984 a8083063 Iustin Pop
  result = True
1985 a8083063 Iustin Pop
  for disk in instance.disks:
1986 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1987 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1988 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1989 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1990 a8083063 Iustin Pop
                     (disk.iv_name, node))
1991 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1992 a8083063 Iustin Pop
          result = False
1993 a8083063 Iustin Pop
  return result
1994 a8083063 Iustin Pop
1995 a8083063 Iustin Pop
1996 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
1997 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
1998 d4f16fd9 Iustin Pop

1999 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
2000 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
2001 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
2002 d4f16fd9 Iustin Pop
  exception.
2003 d4f16fd9 Iustin Pop

2004 d4f16fd9 Iustin Pop
  Args:
2005 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
2006 d4f16fd9 Iustin Pop
    - node: the node name
2007 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
2008 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
2009 d4f16fd9 Iustin Pop

2010 d4f16fd9 Iustin Pop
  """
2011 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
2012 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2013 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2014 d4f16fd9 Iustin Pop
                             " information" % (node,))
2015 d4f16fd9 Iustin Pop
2016 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2017 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2018 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2019 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2020 d4f16fd9 Iustin Pop
  if requested > free_mem:
2021 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2022 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2023 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2024 d4f16fd9 Iustin Pop
2025 d4f16fd9 Iustin Pop
2026 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2027 a8083063 Iustin Pop
  """Starts an instance.
2028 a8083063 Iustin Pop

2029 a8083063 Iustin Pop
  """
2030 a8083063 Iustin Pop
  HPATH = "instance-start"
2031 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2032 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2033 a8083063 Iustin Pop
2034 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2035 a8083063 Iustin Pop
    """Build hooks env.
2036 a8083063 Iustin Pop

2037 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2038 a8083063 Iustin Pop

2039 a8083063 Iustin Pop
    """
2040 a8083063 Iustin Pop
    env = {
2041 a8083063 Iustin Pop
      "FORCE": self.op.force,
2042 a8083063 Iustin Pop
      }
2043 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2044 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2045 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2046 a8083063 Iustin Pop
    return env, nl, nl
2047 a8083063 Iustin Pop
2048 a8083063 Iustin Pop
  def CheckPrereq(self):
2049 a8083063 Iustin Pop
    """Check prerequisites.
2050 a8083063 Iustin Pop

2051 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2052 a8083063 Iustin Pop

2053 a8083063 Iustin Pop
    """
2054 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2055 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2056 a8083063 Iustin Pop
    if instance is None:
2057 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2058 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2059 a8083063 Iustin Pop
2060 a8083063 Iustin Pop
    # check bridges existance
2061 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2062 a8083063 Iustin Pop
2063 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2064 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2065 d4f16fd9 Iustin Pop
                         instance.memory)
2066 d4f16fd9 Iustin Pop
2067 a8083063 Iustin Pop
    self.instance = instance
2068 a8083063 Iustin Pop
    self.op.instance_name = instance.name
2069 a8083063 Iustin Pop
2070 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2071 a8083063 Iustin Pop
    """Start the instance.
2072 a8083063 Iustin Pop

2073 a8083063 Iustin Pop
    """
2074 a8083063 Iustin Pop
    instance = self.instance
2075 a8083063 Iustin Pop
    force = self.op.force
2076 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2077 a8083063 Iustin Pop
2078 a8083063 Iustin Pop
    node_current = instance.primary_node
2079 a8083063 Iustin Pop
2080 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2081 a8083063 Iustin Pop
2082 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2083 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2084 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2085 a8083063 Iustin Pop
2086 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2087 a8083063 Iustin Pop
2088 a8083063 Iustin Pop
2089 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2090 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2091 bf6929a2 Alexander Schreiber

2092 bf6929a2 Alexander Schreiber
  """
2093 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2094 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2095 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2096 bf6929a2 Alexander Schreiber
2097 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2098 bf6929a2 Alexander Schreiber
    """Build hooks env.
2099 bf6929a2 Alexander Schreiber

2100 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2101 bf6929a2 Alexander Schreiber

2102 bf6929a2 Alexander Schreiber
    """
2103 bf6929a2 Alexander Schreiber
    env = {
2104 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2105 bf6929a2 Alexander Schreiber
      }
2106 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2107 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2108 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2109 bf6929a2 Alexander Schreiber
    return env, nl, nl
2110 bf6929a2 Alexander Schreiber
2111 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2112 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2113 bf6929a2 Alexander Schreiber

2114 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2115 bf6929a2 Alexander Schreiber

2116 bf6929a2 Alexander Schreiber
    """
2117 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2118 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2119 bf6929a2 Alexander Schreiber
    if instance is None:
2120 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2121 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2122 bf6929a2 Alexander Schreiber
2123 bf6929a2 Alexander Schreiber
    # check bridges existance
2124 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2125 bf6929a2 Alexander Schreiber
2126 bf6929a2 Alexander Schreiber
    self.instance = instance
2127 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2128 bf6929a2 Alexander Schreiber
2129 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2130 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2131 bf6929a2 Alexander Schreiber

2132 bf6929a2 Alexander Schreiber
    """
2133 bf6929a2 Alexander Schreiber
    instance = self.instance
2134 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2135 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2136 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2137 bf6929a2 Alexander Schreiber
2138 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2139 bf6929a2 Alexander Schreiber
2140 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2141 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2142 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2143 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2144 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2145 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2146 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2147 bf6929a2 Alexander Schreiber
2148 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2149 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2150 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2151 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2152 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2153 bf6929a2 Alexander Schreiber
    else:
2154 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2155 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2156 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2157 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2158 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2159 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2160 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2161 bf6929a2 Alexander Schreiber
2162 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2163 bf6929a2 Alexander Schreiber
2164 bf6929a2 Alexander Schreiber
2165 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2166 a8083063 Iustin Pop
  """Shutdown an instance.
2167 a8083063 Iustin Pop

2168 a8083063 Iustin Pop
  """
2169 a8083063 Iustin Pop
  HPATH = "instance-stop"
2170 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2171 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2172 a8083063 Iustin Pop
2173 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2174 a8083063 Iustin Pop
    """Build hooks env.
2175 a8083063 Iustin Pop

2176 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2177 a8083063 Iustin Pop

2178 a8083063 Iustin Pop
    """
2179 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2180 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2181 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2182 a8083063 Iustin Pop
    return env, nl, nl
2183 a8083063 Iustin Pop
2184 a8083063 Iustin Pop
  def CheckPrereq(self):
2185 a8083063 Iustin Pop
    """Check prerequisites.
2186 a8083063 Iustin Pop

2187 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2188 a8083063 Iustin Pop

2189 a8083063 Iustin Pop
    """
2190 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2191 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2192 a8083063 Iustin Pop
    if instance is None:
2193 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2194 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2195 a8083063 Iustin Pop
    self.instance = instance
2196 a8083063 Iustin Pop
2197 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2198 a8083063 Iustin Pop
    """Shutdown the instance.
2199 a8083063 Iustin Pop

2200 a8083063 Iustin Pop
    """
2201 a8083063 Iustin Pop
    instance = self.instance
2202 a8083063 Iustin Pop
    node_current = instance.primary_node
2203 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2204 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2205 a8083063 Iustin Pop
2206 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2207 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2208 a8083063 Iustin Pop
2209 a8083063 Iustin Pop
2210 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2211 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2212 fe7b0351 Michael Hanselmann

2213 fe7b0351 Michael Hanselmann
  """
2214 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2215 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2216 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2217 fe7b0351 Michael Hanselmann
2218 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2219 fe7b0351 Michael Hanselmann
    """Build hooks env.
2220 fe7b0351 Michael Hanselmann

2221 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2222 fe7b0351 Michael Hanselmann

2223 fe7b0351 Michael Hanselmann
    """
2224 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2225 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2226 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2227 fe7b0351 Michael Hanselmann
    return env, nl, nl
2228 fe7b0351 Michael Hanselmann
2229 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2230 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2231 fe7b0351 Michael Hanselmann

2232 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2233 fe7b0351 Michael Hanselmann

2234 fe7b0351 Michael Hanselmann
    """
2235 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2236 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2237 fe7b0351 Michael Hanselmann
    if instance is None:
2238 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2239 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2240 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2241 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2242 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2243 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2244 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2245 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2246 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2247 fe7b0351 Michael Hanselmann
    if remote_info:
2248 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2249 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2250 3ecf6786 Iustin Pop
                                  instance.primary_node))
2251 d0834de3 Michael Hanselmann
2252 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2253 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2254 d0834de3 Michael Hanselmann
      # OS verification
2255 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2256 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2257 d0834de3 Michael Hanselmann
      if pnode is None:
2258 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2259 3ecf6786 Iustin Pop
                                   self.op.pnode)
2260 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2261 dfa96ded Guido Trotter
      if not os_obj:
2262 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2263 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2264 d0834de3 Michael Hanselmann
2265 fe7b0351 Michael Hanselmann
    self.instance = instance
2266 fe7b0351 Michael Hanselmann
2267 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2268 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2269 fe7b0351 Michael Hanselmann

2270 fe7b0351 Michael Hanselmann
    """
2271 fe7b0351 Michael Hanselmann
    inst = self.instance
2272 fe7b0351 Michael Hanselmann
2273 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2274 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2275 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2276 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2277 d0834de3 Michael Hanselmann
2278 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2279 fe7b0351 Michael Hanselmann
    try:
2280 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2281 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2282 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2283 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2284 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2285 fe7b0351 Michael Hanselmann
    finally:
2286 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2287 fe7b0351 Michael Hanselmann
2288 fe7b0351 Michael Hanselmann
2289 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2290 decd5f45 Iustin Pop
  """Rename an instance.
2291 decd5f45 Iustin Pop

2292 decd5f45 Iustin Pop
  """
2293 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2294 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2295 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2296 decd5f45 Iustin Pop
2297 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2298 decd5f45 Iustin Pop
    """Build hooks env.
2299 decd5f45 Iustin Pop

2300 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2301 decd5f45 Iustin Pop

2302 decd5f45 Iustin Pop
    """
2303 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2304 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2305 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2306 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2307 decd5f45 Iustin Pop
    return env, nl, nl
2308 decd5f45 Iustin Pop
2309 decd5f45 Iustin Pop
  def CheckPrereq(self):
2310 decd5f45 Iustin Pop
    """Check prerequisites.
2311 decd5f45 Iustin Pop

2312 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2313 decd5f45 Iustin Pop

2314 decd5f45 Iustin Pop
    """
2315 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2316 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2317 decd5f45 Iustin Pop
    if instance is None:
2318 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2319 decd5f45 Iustin Pop
                                 self.op.instance_name)
2320 decd5f45 Iustin Pop
    if instance.status != "down":
2321 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2322 decd5f45 Iustin Pop
                                 self.op.instance_name)
2323 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2324 decd5f45 Iustin Pop
    if remote_info:
2325 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2326 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2327 decd5f45 Iustin Pop
                                  instance.primary_node))
2328 decd5f45 Iustin Pop
    self.instance = instance
2329 decd5f45 Iustin Pop
2330 decd5f45 Iustin Pop
    # new name verification
2331 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2332 decd5f45 Iustin Pop
2333 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2334 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2335 7bde3275 Guido Trotter
    if new_name in instance_list:
2336 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2337 7bde3275 Guido Trotter
                                 instance_name)
2338 7bde3275 Guido Trotter
2339 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2340 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2341 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2342 decd5f45 Iustin Pop
      if not result.failed:
2343 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2344 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2345 decd5f45 Iustin Pop
2346 decd5f45 Iustin Pop
2347 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2348 decd5f45 Iustin Pop
    """Reinstall the instance.
2349 decd5f45 Iustin Pop

2350 decd5f45 Iustin Pop
    """
2351 decd5f45 Iustin Pop
    inst = self.instance
2352 decd5f45 Iustin Pop
    old_name = inst.name
2353 decd5f45 Iustin Pop
2354 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2355 decd5f45 Iustin Pop
2356 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2357 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2358 decd5f45 Iustin Pop
2359 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2360 decd5f45 Iustin Pop
    try:
2361 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2362 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2363 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2364 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2365 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2366 decd5f45 Iustin Pop
        logger.Error(msg)
2367 decd5f45 Iustin Pop
    finally:
2368 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2369 decd5f45 Iustin Pop
2370 decd5f45 Iustin Pop
2371 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2372 a8083063 Iustin Pop
  """Remove an instance.
2373 a8083063 Iustin Pop

2374 a8083063 Iustin Pop
  """
2375 a8083063 Iustin Pop
  HPATH = "instance-remove"
2376 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2377 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2378 a8083063 Iustin Pop
2379 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2380 a8083063 Iustin Pop
    """Build hooks env.
2381 a8083063 Iustin Pop

2382 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2383 a8083063 Iustin Pop

2384 a8083063 Iustin Pop
    """
2385 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2386 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2387 a8083063 Iustin Pop
    return env, nl, nl
2388 a8083063 Iustin Pop
2389 a8083063 Iustin Pop
  def CheckPrereq(self):
2390 a8083063 Iustin Pop
    """Check prerequisites.
2391 a8083063 Iustin Pop

2392 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2393 a8083063 Iustin Pop

2394 a8083063 Iustin Pop
    """
2395 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2396 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2397 a8083063 Iustin Pop
    if instance is None:
2398 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2399 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2400 a8083063 Iustin Pop
    self.instance = instance
2401 a8083063 Iustin Pop
2402 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2403 a8083063 Iustin Pop
    """Remove the instance.
2404 a8083063 Iustin Pop

2405 a8083063 Iustin Pop
    """
2406 a8083063 Iustin Pop
    instance = self.instance
2407 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2408 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2409 a8083063 Iustin Pop
2410 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2411 1d67656e Iustin Pop
      if self.op.ignore_failures:
2412 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2413 1d67656e Iustin Pop
      else:
2414 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2415 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2416 a8083063 Iustin Pop
2417 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2418 a8083063 Iustin Pop
2419 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2420 1d67656e Iustin Pop
      if self.op.ignore_failures:
2421 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2422 1d67656e Iustin Pop
      else:
2423 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2424 a8083063 Iustin Pop
2425 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2426 a8083063 Iustin Pop
2427 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2428 a8083063 Iustin Pop
2429 a8083063 Iustin Pop
2430 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2431 a8083063 Iustin Pop
  """Logical unit for querying instances.
2432 a8083063 Iustin Pop

2433 a8083063 Iustin Pop
  """
2434 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2435 a8083063 Iustin Pop
2436 a8083063 Iustin Pop
  def CheckPrereq(self):
2437 a8083063 Iustin Pop
    """Check prerequisites.
2438 a8083063 Iustin Pop

2439 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2440 a8083063 Iustin Pop

2441 a8083063 Iustin Pop
    """
2442 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2443 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2444 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2445 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2446 d6d415e8 Iustin Pop
                               "sda_size", "sdb_size", "vcpus"],
2447 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2448 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2449 a8083063 Iustin Pop
2450 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2451 069dcc86 Iustin Pop
2452 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2453 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2454 a8083063 Iustin Pop

2455 a8083063 Iustin Pop
    """
2456 069dcc86 Iustin Pop
    instance_names = self.wanted
2457 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2458 a8083063 Iustin Pop
                     in instance_names]
2459 a8083063 Iustin Pop
2460 a8083063 Iustin Pop
    # begin data gathering
2461 a8083063 Iustin Pop
2462 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2463 a8083063 Iustin Pop
2464 a8083063 Iustin Pop
    bad_nodes = []
2465 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2466 a8083063 Iustin Pop
      live_data = {}
2467 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2468 a8083063 Iustin Pop
      for name in nodes:
2469 a8083063 Iustin Pop
        result = node_data[name]
2470 a8083063 Iustin Pop
        if result:
2471 a8083063 Iustin Pop
          live_data.update(result)
2472 a8083063 Iustin Pop
        elif result == False:
2473 a8083063 Iustin Pop
          bad_nodes.append(name)
2474 a8083063 Iustin Pop
        # else no instance is alive
2475 a8083063 Iustin Pop
    else:
2476 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2477 a8083063 Iustin Pop
2478 a8083063 Iustin Pop
    # end data gathering
2479 a8083063 Iustin Pop
2480 a8083063 Iustin Pop
    output = []
2481 a8083063 Iustin Pop
    for instance in instance_list:
2482 a8083063 Iustin Pop
      iout = []
2483 a8083063 Iustin Pop
      for field in self.op.output_fields:
2484 a8083063 Iustin Pop
        if field == "name":
2485 a8083063 Iustin Pop
          val = instance.name
2486 a8083063 Iustin Pop
        elif field == "os":
2487 a8083063 Iustin Pop
          val = instance.os
2488 a8083063 Iustin Pop
        elif field == "pnode":
2489 a8083063 Iustin Pop
          val = instance.primary_node
2490 a8083063 Iustin Pop
        elif field == "snodes":
2491 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2492 a8083063 Iustin Pop
        elif field == "admin_state":
2493 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2494 a8083063 Iustin Pop
        elif field == "oper_state":
2495 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2496 8a23d2d3 Iustin Pop
            val = None
2497 a8083063 Iustin Pop
          else:
2498 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2499 d8052456 Iustin Pop
        elif field == "status":
2500 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2501 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2502 d8052456 Iustin Pop
          else:
2503 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2504 d8052456 Iustin Pop
            if running:
2505 d8052456 Iustin Pop
              if instance.status != "down":
2506 d8052456 Iustin Pop
                val = "running"
2507 d8052456 Iustin Pop
              else:
2508 d8052456 Iustin Pop
                val = "ERROR_up"
2509 d8052456 Iustin Pop
            else:
2510 d8052456 Iustin Pop
              if instance.status != "down":
2511 d8052456 Iustin Pop
                val = "ERROR_down"
2512 d8052456 Iustin Pop
              else:
2513 d8052456 Iustin Pop
                val = "ADMIN_down"
2514 a8083063 Iustin Pop
        elif field == "admin_ram":
2515 a8083063 Iustin Pop
          val = instance.memory
2516 a8083063 Iustin Pop
        elif field == "oper_ram":
2517 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2518 8a23d2d3 Iustin Pop
            val = None
2519 a8083063 Iustin Pop
          elif instance.name in live_data:
2520 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2521 a8083063 Iustin Pop
          else:
2522 a8083063 Iustin Pop
            val = "-"
2523 a8083063 Iustin Pop
        elif field == "disk_template":
2524 a8083063 Iustin Pop
          val = instance.disk_template
2525 a8083063 Iustin Pop
        elif field == "ip":
2526 a8083063 Iustin Pop
          val = instance.nics[0].ip
2527 a8083063 Iustin Pop
        elif field == "bridge":
2528 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2529 a8083063 Iustin Pop
        elif field == "mac":
2530 a8083063 Iustin Pop
          val = instance.nics[0].mac
2531 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2532 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2533 644eeef9 Iustin Pop
          if disk is None:
2534 8a23d2d3 Iustin Pop
            val = None
2535 644eeef9 Iustin Pop
          else:
2536 644eeef9 Iustin Pop
            val = disk.size
2537 d6d415e8 Iustin Pop
        elif field == "vcpus":
2538 d6d415e8 Iustin Pop
          val = instance.vcpus
2539 a8083063 Iustin Pop
        else:
2540 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2541 a8083063 Iustin Pop
        iout.append(val)
2542 a8083063 Iustin Pop
      output.append(iout)
2543 a8083063 Iustin Pop
2544 a8083063 Iustin Pop
    return output
2545 a8083063 Iustin Pop
2546 a8083063 Iustin Pop
2547 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2548 a8083063 Iustin Pop
  """Failover an instance.
2549 a8083063 Iustin Pop

2550 a8083063 Iustin Pop
  """
2551 a8083063 Iustin Pop
  HPATH = "instance-failover"
2552 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2553 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2554 a8083063 Iustin Pop
2555 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2556 a8083063 Iustin Pop
    """Build hooks env.
2557 a8083063 Iustin Pop

2558 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2559 a8083063 Iustin Pop

2560 a8083063 Iustin Pop
    """
2561 a8083063 Iustin Pop
    env = {
2562 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2563 a8083063 Iustin Pop
      }
2564 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2565 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2566 a8083063 Iustin Pop
    return env, nl, nl
2567 a8083063 Iustin Pop
2568 a8083063 Iustin Pop
  def CheckPrereq(self):
2569 a8083063 Iustin Pop
    """Check prerequisites.
2570 a8083063 Iustin Pop

2571 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2572 a8083063 Iustin Pop

2573 a8083063 Iustin Pop
    """
2574 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2575 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2576 a8083063 Iustin Pop
    if instance is None:
2577 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2578 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2579 a8083063 Iustin Pop
2580 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2581 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2582 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2583 2a710df1 Michael Hanselmann
2584 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2585 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2586 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2587 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2588 2a710df1 Michael Hanselmann
2589 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2590 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2591 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2592 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2593 3a7c308e Guido Trotter
2594 a8083063 Iustin Pop
    # check bridge existance
2595 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2596 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2597 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2598 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2599 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2600 a8083063 Iustin Pop
2601 a8083063 Iustin Pop
    self.instance = instance
2602 a8083063 Iustin Pop
2603 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2604 a8083063 Iustin Pop
    """Failover an instance.
2605 a8083063 Iustin Pop

2606 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2607 a8083063 Iustin Pop
    starting it on the secondary.
2608 a8083063 Iustin Pop

2609 a8083063 Iustin Pop
    """
2610 a8083063 Iustin Pop
    instance = self.instance
2611 a8083063 Iustin Pop
2612 a8083063 Iustin Pop
    source_node = instance.primary_node
2613 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2614 a8083063 Iustin Pop
2615 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2616 a8083063 Iustin Pop
    for dev in instance.disks:
2617 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2618 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2619 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2620 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2621 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2622 a8083063 Iustin Pop
2623 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2624 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2625 a8083063 Iustin Pop
                (instance.name, source_node))
2626 a8083063 Iustin Pop
2627 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2628 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2629 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2630 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2631 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2632 24a40d57 Iustin Pop
      else:
2633 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2634 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2635 a8083063 Iustin Pop
2636 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2637 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2638 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2639 a8083063 Iustin Pop
2640 a8083063 Iustin Pop
    instance.primary_node = target_node
2641 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2642 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2643 a8083063 Iustin Pop
2644 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2645 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2646 a8083063 Iustin Pop
                (instance.name, target_node))
2647 a8083063 Iustin Pop
2648 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2649 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2650 a8083063 Iustin Pop
    if not disks_ok:
2651 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2652 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2653 a8083063 Iustin Pop
2654 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2655 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2656 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2657 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2658 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2659 a8083063 Iustin Pop
2660 a8083063 Iustin Pop
2661 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2662 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2663 a8083063 Iustin Pop

2664 a8083063 Iustin Pop
  This always creates all devices.
2665 a8083063 Iustin Pop

2666 a8083063 Iustin Pop
  """
2667 a8083063 Iustin Pop
  if device.children:
2668 a8083063 Iustin Pop
    for child in device.children:
2669 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2670 a8083063 Iustin Pop
        return False
2671 a8083063 Iustin Pop
2672 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2673 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2674 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2675 a8083063 Iustin Pop
  if not new_id:
2676 a8083063 Iustin Pop
    return False
2677 a8083063 Iustin Pop
  if device.physical_id is None:
2678 a8083063 Iustin Pop
    device.physical_id = new_id
2679 a8083063 Iustin Pop
  return True
2680 a8083063 Iustin Pop
2681 a8083063 Iustin Pop
2682 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2683 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2684 a8083063 Iustin Pop

2685 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2686 a8083063 Iustin Pop
  all its children.
2687 a8083063 Iustin Pop

2688 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2689 a8083063 Iustin Pop

2690 a8083063 Iustin Pop
  """
2691 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2692 a8083063 Iustin Pop
    force = True
2693 a8083063 Iustin Pop
  if device.children:
2694 a8083063 Iustin Pop
    for child in device.children:
2695 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2696 3f78eef2 Iustin Pop
                                        child, force, info):
2697 a8083063 Iustin Pop
        return False
2698 a8083063 Iustin Pop
2699 a8083063 Iustin Pop
  if not force:
2700 a8083063 Iustin Pop
    return True
2701 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2702 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2703 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2704 a8083063 Iustin Pop
  if not new_id:
2705 a8083063 Iustin Pop
    return False
2706 a8083063 Iustin Pop
  if device.physical_id is None:
2707 a8083063 Iustin Pop
    device.physical_id = new_id
2708 a8083063 Iustin Pop
  return True
2709 a8083063 Iustin Pop
2710 a8083063 Iustin Pop
2711 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2712 923b1523 Iustin Pop
  """Generate a suitable LV name.
2713 923b1523 Iustin Pop

2714 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2715 923b1523 Iustin Pop

2716 923b1523 Iustin Pop
  """
2717 923b1523 Iustin Pop
  results = []
2718 923b1523 Iustin Pop
  for val in exts:
2719 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2720 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2721 923b1523 Iustin Pop
  return results
2722 923b1523 Iustin Pop
2723 923b1523 Iustin Pop
2724 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2725 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2726 a8083063 Iustin Pop

2727 a8083063 Iustin Pop
  """
2728 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2729 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2730 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2731 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2732 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2733 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2734 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
2735 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2736 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2737 a8083063 Iustin Pop
  return drbd_dev
2738 a8083063 Iustin Pop
2739 a8083063 Iustin Pop
2740 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2741 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2742 a1f445d3 Iustin Pop

2743 a1f445d3 Iustin Pop
  """
2744 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2745 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2746 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2747 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2748 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2749 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2750 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2751 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2752 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2753 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2754 a1f445d3 Iustin Pop
  return drbd_dev
2755 a1f445d3 Iustin Pop
2756 7c0d6283 Michael Hanselmann
2757 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2758 a8083063 Iustin Pop
                          instance_name, primary_node,
2759 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2760 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2761 a8083063 Iustin Pop

2762 a8083063 Iustin Pop
  """
2763 a8083063 Iustin Pop
  #TODO: compute space requirements
2764 a8083063 Iustin Pop
2765 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2766 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2767 a8083063 Iustin Pop
    disks = []
2768 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2769 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2770 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2771 923b1523 Iustin Pop
2772 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2773 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2774 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2775 a8083063 Iustin Pop
                           iv_name = "sda")
2776 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2777 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2778 a8083063 Iustin Pop
                           iv_name = "sdb")
2779 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2780 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_LOCAL_RAID1:
2781 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2782 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2783 923b1523 Iustin Pop
2784 923b1523 Iustin Pop
2785 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2786 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2787 fe96220b Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2788 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2789 fe96220b Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2790 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2791 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
2792 a8083063 Iustin Pop
                              size=disk_sz,
2793 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2794 fe96220b Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2795 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2796 fe96220b Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2797 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2798 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
2799 a8083063 Iustin Pop
                              size=swap_sz,
2800 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2801 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2802 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2803 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2804 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2805 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2806 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2807 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2808 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2809 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2810 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
2811 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2812 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2813 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2814 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
2815 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2816 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2817 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2818 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2819 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2820 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2821 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2822 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2823 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2824 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2825 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2826 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2827 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2828 a8083063 Iustin Pop
  else:
2829 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2830 a8083063 Iustin Pop
  return disks
2831 a8083063 Iustin Pop
2832 a8083063 Iustin Pop
2833 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2834 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2835 3ecf6786 Iustin Pop

2836 3ecf6786 Iustin Pop
  """
2837 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2838 a0c3fea1 Michael Hanselmann
2839 a0c3fea1 Michael Hanselmann
2840 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2841 a8083063 Iustin Pop
  """Create all disks for an instance.
2842 a8083063 Iustin Pop

2843 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2844 a8083063 Iustin Pop

2845 a8083063 Iustin Pop
  Args:
2846 a8083063 Iustin Pop
    instance: the instance object
2847 a8083063 Iustin Pop

2848 a8083063 Iustin Pop
  Returns:
2849 a8083063 Iustin Pop
    True or False showing the success of the creation process
2850 a8083063 Iustin Pop

2851 a8083063 Iustin Pop
  """
2852 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2853 a0c3fea1 Michael Hanselmann
2854 a8083063 Iustin Pop
  for device in instance.disks:
2855 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2856 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2857 a8083063 Iustin Pop
    #HARDCODE
2858 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2859 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2860 3f78eef2 Iustin Pop
                                        device, False, info):
2861 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2862 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2863 a8083063 Iustin Pop
        return False
2864 a8083063 Iustin Pop
    #HARDCODE
2865 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2866 3f78eef2 Iustin Pop
                                    instance, device, info):
2867 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2868 a8083063 Iustin Pop
                   device.iv_name)
2869 a8083063 Iustin Pop
      return False
2870 a8083063 Iustin Pop
  return True
2871 a8083063 Iustin Pop
2872 a8083063 Iustin Pop
2873 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2874 a8083063 Iustin Pop
  """Remove all disks for an instance.
2875 a8083063 Iustin Pop

2876 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2877 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2878 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2879 a8083063 Iustin Pop
  with `_CreateDisks()`).
2880 a8083063 Iustin Pop

2881 a8083063 Iustin Pop
  Args:
2882 a8083063 Iustin Pop
    instance: the instance object
2883 a8083063 Iustin Pop

2884 a8083063 Iustin Pop
  Returns:
2885 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2886 a8083063 Iustin Pop

2887 a8083063 Iustin Pop
  """
2888 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2889 a8083063 Iustin Pop
2890 a8083063 Iustin Pop
  result = True
2891 a8083063 Iustin Pop
  for device in instance.disks:
2892 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2893 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2894 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2895 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2896 a8083063 Iustin Pop
                     " continuing anyway" %
2897 a8083063 Iustin Pop
                     (device.iv_name, node))
2898 a8083063 Iustin Pop
        result = False
2899 a8083063 Iustin Pop
  return result
2900 a8083063 Iustin Pop
2901 a8083063 Iustin Pop
2902 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2903 a8083063 Iustin Pop
  """Create an instance.
2904 a8083063 Iustin Pop

2905 a8083063 Iustin Pop
  """
2906 a8083063 Iustin Pop
  HPATH = "instance-add"
2907 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2908 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2909 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2910 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
2911 a8083063 Iustin Pop
2912 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2913 a8083063 Iustin Pop
    """Build hooks env.
2914 a8083063 Iustin Pop

2915 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2916 a8083063 Iustin Pop

2917 a8083063 Iustin Pop
    """
2918 a8083063 Iustin Pop
    env = {
2919 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2920 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2921 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2922 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2923 a8083063 Iustin Pop
      }
2924 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2925 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2926 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2927 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2928 396e1b78 Michael Hanselmann
2929 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2930 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2931 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2932 396e1b78 Michael Hanselmann
      status=self.instance_status,
2933 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2934 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2935 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2936 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
2937 396e1b78 Michael Hanselmann
    ))
2938 a8083063 Iustin Pop
2939 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2940 a8083063 Iustin Pop
          self.secondaries)
2941 a8083063 Iustin Pop
    return env, nl, nl
2942 a8083063 Iustin Pop
2943 a8083063 Iustin Pop
2944 a8083063 Iustin Pop
  def CheckPrereq(self):
2945 a8083063 Iustin Pop
    """Check prerequisites.
2946 a8083063 Iustin Pop

2947 a8083063 Iustin Pop
    """
2948 40ed12dd Guido Trotter
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order"]:
2949 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
2950 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
2951 40ed12dd Guido Trotter
2952 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2953 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2954 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2955 3ecf6786 Iustin Pop
                                 self.op.mode)
2956 a8083063 Iustin Pop
2957 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2958 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2959 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2960 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2961 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2962 3ecf6786 Iustin Pop
                                   " node and path options")
2963 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2964 a8083063 Iustin Pop
      if src_node_full is None:
2965 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2966 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2967 a8083063 Iustin Pop
2968 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2969 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2970 a8083063 Iustin Pop
2971 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2972 a8083063 Iustin Pop
2973 a8083063 Iustin Pop
      if not export_info:
2974 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2975 a8083063 Iustin Pop
2976 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2977 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2978 a8083063 Iustin Pop
2979 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2980 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2981 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2982 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2983 a8083063 Iustin Pop
2984 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2985 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2986 3ecf6786 Iustin Pop
                                   " one data disk")
2987 a8083063 Iustin Pop
2988 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2989 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2990 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2991 a8083063 Iustin Pop
                                                         'disk0_dump'))
2992 a8083063 Iustin Pop
      self.src_image = diskimage
2993 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2994 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2995 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2996 a8083063 Iustin Pop
2997 a8083063 Iustin Pop
    # check primary node
2998 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2999 a8083063 Iustin Pop
    if pnode is None:
3000 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
3001 3ecf6786 Iustin Pop
                                 self.op.pnode)
3002 a8083063 Iustin Pop
    self.op.pnode = pnode.name
3003 a8083063 Iustin Pop
    self.pnode = pnode
3004 a8083063 Iustin Pop
    self.secondaries = []
3005 a8083063 Iustin Pop
    # disk template and mirror node verification
3006 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
3007 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
3008 a8083063 Iustin Pop
3009 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
3010 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
3011 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
3012 3ecf6786 Iustin Pop
                                   " a mirror node")
3013 a8083063 Iustin Pop
3014 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3015 a8083063 Iustin Pop
      if snode_name is None:
3016 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3017 3ecf6786 Iustin Pop
                                   self.op.snode)
3018 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3019 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3020 3ecf6786 Iustin Pop
                                   " the primary node.")
3021 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3022 a8083063 Iustin Pop
3023 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
3024 ed1ebc60 Guido Trotter
    req_size_dict = {
3025 8d75db10 Iustin Pop
      constants.DT_DISKLESS: None,
3026 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
3027 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
3028 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
3029 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
3030 a1f445d3 Iustin Pop
      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
3031 ed1ebc60 Guido Trotter
    }
3032 ed1ebc60 Guido Trotter
3033 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
3034 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
3035 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
3036 ed1ebc60 Guido Trotter
3037 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
3038 ed1ebc60 Guido Trotter
3039 8d75db10 Iustin Pop
    # Check lv size requirements
3040 8d75db10 Iustin Pop
    if req_size is not None:
3041 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3042 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3043 8d75db10 Iustin Pop
      for node in nodenames:
3044 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3045 8d75db10 Iustin Pop
        if not info:
3046 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3047 8d75db10 Iustin Pop
                                     " from node '%s'" % nodeinfo)
3048 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3049 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3050 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3051 8d75db10 Iustin Pop
                                     " node %s" % node)
3052 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3053 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3054 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3055 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3056 ed1ebc60 Guido Trotter
3057 a8083063 Iustin Pop
    # os verification
3058 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3059 dfa96ded Guido Trotter
    if not os_obj:
3060 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3061 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3062 a8083063 Iustin Pop
3063 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3064 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3065 3b6d8c9b Iustin Pop
3066 a8083063 Iustin Pop
    # instance verification
3067 89e1fc26 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3068 a8083063 Iustin Pop
3069 bcf043c9 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3070 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3071 a8083063 Iustin Pop
    if instance_name in instance_list:
3072 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3073 3ecf6786 Iustin Pop
                                 instance_name)
3074 a8083063 Iustin Pop
3075 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
3076 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
3077 a8083063 Iustin Pop
      inst_ip = None
3078 a8083063 Iustin Pop
    elif ip.lower() == "auto":
3079 bcf043c9 Iustin Pop
      inst_ip = hostname1.ip
3080 a8083063 Iustin Pop
    else:
3081 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
3082 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3083 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
3084 a8083063 Iustin Pop
      inst_ip = ip
3085 a8083063 Iustin Pop
    self.inst_ip = inst_ip
3086 a8083063 Iustin Pop
3087 bdd55f71 Iustin Pop
    if self.op.start and not self.op.ip_check:
3088 bdd55f71 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3089 bdd55f71 Iustin Pop
                                 " adding an instance in start mode")
3090 bdd55f71 Iustin Pop
3091 bdd55f71 Iustin Pop
    if self.op.ip_check:
3092 b15d625f Iustin Pop
      if utils.TcpPing(hostname1.ip, constants.DEFAULT_NODED_PORT):
3093 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3094 16abfbc2 Alexander Schreiber
                                   (hostname1.ip, instance_name))
3095 a8083063 Iustin Pop
3096 1862d460 Alexander Schreiber
    # MAC address verification
3097 1862d460 Alexander Schreiber
    if self.op.mac != "auto":
3098 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.op.mac.lower()):
3099 1862d460 Alexander Schreiber
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3100 1862d460 Alexander Schreiber
                                   self.op.mac)
3101 1862d460 Alexander Schreiber
3102 a8083063 Iustin Pop
    # bridge verification
3103 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3104 a8083063 Iustin Pop
    if bridge is None:
3105 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3106 a8083063 Iustin Pop
    else:
3107 a8083063 Iustin Pop
      self.op.bridge = bridge
3108 a8083063 Iustin Pop
3109 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3110 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3111 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3112 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3113 a8083063 Iustin Pop
3114 25c5878d Alexander Schreiber
    # boot order verification
3115 25c5878d Alexander Schreiber
    if self.op.hvm_boot_order is not None:
3116 25c5878d Alexander Schreiber
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3117 b08d5a87 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3118 b08d5a87 Iustin Pop
                                   " must be one or more of [acdn]")
3119 25c5878d Alexander Schreiber
3120 a8083063 Iustin Pop
    if self.op.start:
3121 a8083063 Iustin Pop
      self.instance_status = 'up'
3122 a8083063 Iustin Pop
    else:
3123 a8083063 Iustin Pop
      self.instance_status = 'down'
3124 a8083063 Iustin Pop
3125 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3126 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3127 a8083063 Iustin Pop

3128 a8083063 Iustin Pop
    """
3129 a8083063 Iustin Pop
    instance = self.op.instance_name
3130 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3131 a8083063 Iustin Pop
3132 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3133 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3134 1862d460 Alexander Schreiber
    else:
3135 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3136 1862d460 Alexander Schreiber
3137 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3138 a8083063 Iustin Pop
    if self.inst_ip is not None:
3139 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3140 a8083063 Iustin Pop
3141 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3142 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3143 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3144 2a6469d5 Alexander Schreiber
    else:
3145 2a6469d5 Alexander Schreiber
      network_port = None
3146 58acb49d Alexander Schreiber
3147 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3148 a8083063 Iustin Pop
                                  self.op.disk_template,
3149 a8083063 Iustin Pop
                                  instance, pnode_name,
3150 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3151 a8083063 Iustin Pop
                                  self.op.swap_size)
3152 a8083063 Iustin Pop
3153 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3154 a8083063 Iustin Pop
                            primary_node=pnode_name,
3155 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3156 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3157 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3158 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3159 a8083063 Iustin Pop
                            status=self.instance_status,
3160 58acb49d Alexander Schreiber
                            network_port=network_port,
3161 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3162 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3163 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3164 a8083063 Iustin Pop
                            )
3165 a8083063 Iustin Pop
3166 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3167 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3168 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3169 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3170 a8083063 Iustin Pop
3171 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3172 a8083063 Iustin Pop
3173 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3174 a8083063 Iustin Pop
3175 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3176 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3177 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3178 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3179 a8083063 Iustin Pop
      time.sleep(15)
3180 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3181 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3182 a8083063 Iustin Pop
    else:
3183 a8083063 Iustin Pop
      disk_abort = False
3184 a8083063 Iustin Pop
3185 a8083063 Iustin Pop
    if disk_abort:
3186 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3187 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3188 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3189 3ecf6786 Iustin Pop
                               " this instance")
3190 a8083063 Iustin Pop
3191 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3192 a8083063 Iustin Pop
                (instance, pnode_name))
3193 a8083063 Iustin Pop
3194 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3195 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3196 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3197 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3198 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3199 3ecf6786 Iustin Pop
                                   " on node %s" %
3200 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3201 a8083063 Iustin Pop
3202 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3203 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3204 a8083063 Iustin Pop
        src_node = self.op.src_node
3205 a8083063 Iustin Pop
        src_image = self.src_image
3206 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3207 a8083063 Iustin Pop
                                                src_node, src_image):
3208 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3209 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3210 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3211 a8083063 Iustin Pop
      else:
3212 a8083063 Iustin Pop
        # also checked in the prereq part
3213 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3214 3ecf6786 Iustin Pop
                                     % self.op.mode)
3215 a8083063 Iustin Pop
3216 a8083063 Iustin Pop
    if self.op.start:
3217 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3218 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3219 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3220 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3221 a8083063 Iustin Pop
3222 a8083063 Iustin Pop
3223 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3224 a8083063 Iustin Pop
  """Connect to an instance's console.
3225 a8083063 Iustin Pop

3226 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3227 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3228 a8083063 Iustin Pop
  console.
3229 a8083063 Iustin Pop

3230 a8083063 Iustin Pop
  """
3231 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3232 a8083063 Iustin Pop
3233 a8083063 Iustin Pop
  def CheckPrereq(self):
3234 a8083063 Iustin Pop
    """Check prerequisites.
3235 a8083063 Iustin Pop

3236 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3237 a8083063 Iustin Pop

3238 a8083063 Iustin Pop
    """
3239 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3240 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3241 a8083063 Iustin Pop
    if instance is None:
3242 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3243 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3244 a8083063 Iustin Pop
    self.instance = instance
3245 a8083063 Iustin Pop
3246 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3247 a8083063 Iustin Pop
    """Connect to the console of an instance
3248 a8083063 Iustin Pop

3249 a8083063 Iustin Pop
    """
3250 a8083063 Iustin Pop
    instance = self.instance
3251 a8083063 Iustin Pop
    node = instance.primary_node
3252 a8083063 Iustin Pop
3253 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3254 a8083063 Iustin Pop
    if node_insts is False:
3255 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3256 a8083063 Iustin Pop
3257 a8083063 Iustin Pop
    if instance.name not in node_insts:
3258 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3259 a8083063 Iustin Pop
3260 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3261 a8083063 Iustin Pop
3262 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3263 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3264 b047857b Michael Hanselmann
3265 82122173 Iustin Pop
    # build ssh cmdline
3266 b047857b Michael Hanselmann
    cmd = self.ssh.BuildCmd(node, "root", console_cmd, batch=True, tty=True)
3267 b047857b Michael Hanselmann
    return cmd[0], cmd
3268 a8083063 Iustin Pop
3269 a8083063 Iustin Pop
3270 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
3271 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
3272 a8083063 Iustin Pop

3273 a8083063 Iustin Pop
  """
3274 a8083063 Iustin Pop
  HPATH = "mirror-add"
3275 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3276 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3277 a8083063 Iustin Pop
3278 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3279 a8083063 Iustin Pop
    """Build hooks env.
3280 a8083063 Iustin Pop

3281 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3282 a8083063 Iustin Pop

3283 a8083063 Iustin Pop
    """
3284 a8083063 Iustin Pop
    env = {
3285 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3286 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3287 a8083063 Iustin Pop
      }
3288 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3289 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3290 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3291 a8083063 Iustin Pop
    return env, nl, nl
3292 a8083063 Iustin Pop
3293 a8083063 Iustin Pop
  def CheckPrereq(self):
3294 a8083063 Iustin Pop
    """Check prerequisites.
3295 a8083063 Iustin Pop

3296 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3297 a8083063 Iustin Pop

3298 a8083063 Iustin Pop
    """
3299 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3300 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3301 a8083063 Iustin Pop
    if instance is None:
3302 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3303 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3304 a8083063 Iustin Pop
    self.instance = instance
3305 a8083063 Iustin Pop
3306 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3307 a8083063 Iustin Pop
    if remote_node is None:
3308 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3309 a8083063 Iustin Pop
    self.remote_node = remote_node
3310 a8083063 Iustin Pop
3311 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3312 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3313 3ecf6786 Iustin Pop
                                 " the instance.")
3314 a8083063 Iustin Pop
3315 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3316 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3317 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3318 a8083063 Iustin Pop
    for disk in instance.disks:
3319 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3320 a8083063 Iustin Pop
        break
3321 a8083063 Iustin Pop
    else:
3322 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3323 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3324 a8083063 Iustin Pop
    if len(disk.children) > 1:
3325 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("The device already has two slave devices."
3326 f4bc1f2c Michael Hanselmann
                                 " This would create a 3-disk raid1 which we"
3327 f4bc1f2c Michael Hanselmann
                                 " don't allow.")
3328 a8083063 Iustin Pop
    self.disk = disk
3329 a8083063 Iustin Pop
3330 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3331 a8083063 Iustin Pop
    """Add the mirror component
3332 a8083063 Iustin Pop

3333 a8083063 Iustin Pop
    """
3334 a8083063 Iustin Pop
    disk = self.disk
3335 a8083063 Iustin Pop
    instance = self.instance
3336 a8083063 Iustin Pop
3337 a8083063 Iustin Pop
    remote_node = self.remote_node
3338 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3339 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
3340 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3341 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
3342 a8083063 Iustin Pop
3343 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
3344 a8083063 Iustin Pop
    #HARDCODE
3345 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, instance,
3346 3f78eef2 Iustin Pop
                                      new_drbd, False,
3347 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3348 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
3349 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
3350 a8083063 Iustin Pop
3351 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
3352 a8083063 Iustin Pop
    #HARDCODE
3353 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node,
3354 3f78eef2 Iustin Pop
                                    instance, new_drbd,
3355 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
3356 a8083063 Iustin Pop
      # remove secondary dev
3357 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3358 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
3359 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
3360 a8083063 Iustin Pop
3361 a8083063 Iustin Pop
    # the device exists now
3362 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
3363 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
3364 153d9724 Iustin Pop
    if not rpc.call_blockdev_addchildren(instance.primary_node,
3365 153d9724 Iustin Pop
                                         disk, [new_drbd]):
3366 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
3367 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3368 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3369 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
3370 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3371 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3372 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
3373 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
3374 a8083063 Iustin Pop
3375 a8083063 Iustin Pop
    disk.children.append(new_drbd)
3376 a8083063 Iustin Pop
3377 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3378 a8083063 Iustin Pop
3379 5bfac263 Iustin Pop
    _WaitForSync(self.cfg, instance, self.proc)
3380 a8083063 Iustin Pop
3381 a8083063 Iustin Pop
    return 0
3382 a8083063 Iustin Pop
3383 a8083063 Iustin Pop
3384 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
3385 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
3386 a8083063 Iustin Pop

3387 a8083063 Iustin Pop
  """
3388 a8083063 Iustin Pop
  HPATH = "mirror-remove"
3389 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3390 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3391 a8083063 Iustin Pop
3392 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3393 a8083063 Iustin Pop
    """Build hooks env.
3394 a8083063 Iustin Pop

3395 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3396 a8083063 Iustin Pop

3397 a8083063 Iustin Pop
    """
3398 a8083063 Iustin Pop
    env = {
3399 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3400 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
3401 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
3402 a8083063 Iustin Pop
      }
3403 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3404 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3405 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3406 a8083063 Iustin Pop
    return env, nl, nl
3407 a8083063 Iustin Pop
3408 a8083063 Iustin Pop
  def CheckPrereq(self):
3409 a8083063 Iustin Pop
    """Check prerequisites.
3410 a8083063 Iustin Pop

3411 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3412 a8083063 Iustin Pop

3413 a8083063 Iustin Pop
    """
3414 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3415 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3416 a8083063 Iustin Pop
    if instance is None:
3417 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3418 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3419 a8083063 Iustin Pop
    self.instance = instance
3420 a8083063 Iustin Pop
3421 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3422 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3423 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3424 a8083063 Iustin Pop
    for disk in instance.disks:
3425 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3426 a8083063 Iustin Pop
        break
3427 a8083063 Iustin Pop
    else:
3428 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3429 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3430 a8083063 Iustin Pop
    for child in disk.children:
3431 fe96220b Iustin Pop
      if (child.dev_type == constants.LD_DRBD7 and
3432 fe96220b Iustin Pop
          child.logical_id[2] == self.op.disk_id):
3433 a8083063 Iustin Pop
        break
3434 a8083063 Iustin Pop
    else:
3435 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3436 a8083063 Iustin Pop
3437 a8083063 Iustin Pop
    if len(disk.children) < 2:
3438 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3439 3ecf6786 Iustin Pop
                                 " a mirror.")
3440 a8083063 Iustin Pop
    self.disk = disk
3441 a8083063 Iustin Pop
    self.child = child
3442 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3443 a8083063 Iustin Pop
      oid = 1
3444 a8083063 Iustin Pop
    else:
3445 a8083063 Iustin Pop
      oid = 0
3446 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3447 a8083063 Iustin Pop
3448 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3449 a8083063 Iustin Pop
    """Remove the mirror component
3450 a8083063 Iustin Pop

3451 a8083063 Iustin Pop
    """
3452 a8083063 Iustin Pop
    instance = self.instance
3453 a8083063 Iustin Pop
    disk = self.disk
3454 a8083063 Iustin Pop
    child = self.child
3455 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3456 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3457 153d9724 Iustin Pop
    if not rpc.call_blockdev_removechildren(instance.primary_node,
3458 153d9724 Iustin Pop
                                            disk, [child]):
3459 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3460 a8083063 Iustin Pop
3461 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3462 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3463 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3464 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3465 a8083063 Iustin Pop
                     " continuing operation." % node)
3466 a8083063 Iustin Pop
3467 a8083063 Iustin Pop
    disk.children.remove(child)
3468 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3469 a8083063 Iustin Pop
3470 a8083063 Iustin Pop
3471 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3472 a8083063 Iustin Pop
  """Replace the disks of an instance.
3473 a8083063 Iustin Pop

3474 a8083063 Iustin Pop
  """
3475 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3476 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3477 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3478 a8083063 Iustin Pop
3479 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3480 a8083063 Iustin Pop
    """Build hooks env.
3481 a8083063 Iustin Pop

3482 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3483 a8083063 Iustin Pop

3484 a8083063 Iustin Pop
    """
3485 a8083063 Iustin Pop
    env = {
3486 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3487 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3488 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3489 a8083063 Iustin Pop
      }
3490 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3491 0834c866 Iustin Pop
    nl = [
3492 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3493 0834c866 Iustin Pop
      self.instance.primary_node,
3494 0834c866 Iustin Pop
      ]
3495 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3496 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3497 a8083063 Iustin Pop
    return env, nl, nl
3498 a8083063 Iustin Pop
3499 a8083063 Iustin Pop
  def CheckPrereq(self):
3500 a8083063 Iustin Pop
    """Check prerequisites.
3501 a8083063 Iustin Pop

3502 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3503 a8083063 Iustin Pop

3504 a8083063 Iustin Pop
    """
3505 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3506 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3507 a8083063 Iustin Pop
    if instance is None:
3508 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3509 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3510 a8083063 Iustin Pop
    self.instance = instance
3511 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3512 a8083063 Iustin Pop
3513 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3514 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3515 a9e0c397 Iustin Pop
                                 " network mirrored.")
3516 a8083063 Iustin Pop
3517 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3518 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3519 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3520 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3521 a8083063 Iustin Pop
3522 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3523 a9e0c397 Iustin Pop
3524 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3525 a9e0c397 Iustin Pop
    if remote_node is not None:
3526 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3527 a8083063 Iustin Pop
      if remote_node is None:
3528 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3529 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3530 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3531 a9e0c397 Iustin Pop
    else:
3532 a9e0c397 Iustin Pop
      self.remote_node_info = None
3533 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3534 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3535 3ecf6786 Iustin Pop
                                 " the instance.")
3536 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3537 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3538 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3539 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3540 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3541 0834c866 Iustin Pop
                                   " replacement")
3542 a9e0c397 Iustin Pop
      # the user gave the current secondary, switch to
3543 0834c866 Iustin Pop
      # 'no-replace-secondary' mode for drbd7
3544 a9e0c397 Iustin Pop
      remote_node = None
3545 a9e0c397 Iustin Pop
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3546 a9e0c397 Iustin Pop
        self.op.mode != constants.REPLACE_DISK_ALL):
3547 a9e0c397 Iustin Pop
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3548 a9e0c397 Iustin Pop
                                 " disks replacement, not individual ones")
3549 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3550 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3551 7df43a76 Iustin Pop
          remote_node is not None):
3552 7df43a76 Iustin Pop
        # switch to replace secondary mode
3553 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3554 7df43a76 Iustin Pop
3555 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3556 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3557 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3558 a9e0c397 Iustin Pop
                                   " both at once")
3559 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3560 a9e0c397 Iustin Pop
        if remote_node is not None:
3561 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3562 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3563 a9e0c397 Iustin Pop
                                     " node disk replacement")
3564 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3565 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3566 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3567 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3568 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3569 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3570 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3571 a9e0c397 Iustin Pop
      else:
3572 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3573 a9e0c397 Iustin Pop
3574 a9e0c397 Iustin Pop
    for name in self.op.disks:
3575 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3576 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3577 a9e0c397 Iustin Pop
                                   (name, instance.name))
3578 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3579 a8083063 Iustin Pop
3580 a9e0c397 Iustin Pop
  def _ExecRR1(self, feedback_fn):
3581 a8083063 Iustin Pop
    """Replace the disks of an instance.
3582 a8083063 Iustin Pop

3583 a8083063 Iustin Pop
    """
3584 a8083063 Iustin Pop
    instance = self.instance
3585 a8083063 Iustin Pop
    iv_names = {}
3586 a8083063 Iustin Pop
    # start of work
3587 a9e0c397 Iustin Pop
    if self.op.remote_node is None:
3588 a9e0c397 Iustin Pop
      remote_node = self.sec_node
3589 a9e0c397 Iustin Pop
    else:
3590 a9e0c397 Iustin Pop
      remote_node = self.op.remote_node
3591 a8083063 Iustin Pop
    cfg = self.cfg
3592 a8083063 Iustin Pop
    for dev in instance.disks:
3593 a8083063 Iustin Pop
      size = dev.size
3594 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3595 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3596 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3597 923b1523 Iustin Pop
                                       remote_node, size, names)
3598 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3599 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3600 a8083063 Iustin Pop
                  dev.iv_name)
3601 a8083063 Iustin Pop
      #HARDCODE
3602 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3603 3f78eef2 Iustin Pop
                                        new_drbd, False,
3604 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3605 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create new component on secondary"
3606 f4bc1f2c Michael Hanselmann
                                 " node %s. Full abort, cleanup manually!" %
3607 3ecf6786 Iustin Pop
                                 remote_node)
3608 a8083063 Iustin Pop
3609 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3610 a8083063 Iustin Pop
      #HARDCODE
3611 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3612 3f78eef2 Iustin Pop
                                      instance, new_drbd,
3613 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3614 a8083063 Iustin Pop
        # remove secondary dev
3615 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3616 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3617 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create volume on primary!"
3618 f4bc1f2c Michael Hanselmann
                                 " Full abort, cleanup manually!!")
3619 a8083063 Iustin Pop
3620 a8083063 Iustin Pop
      # the device exists now
3621 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3622 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3623 153d9724 Iustin Pop
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3624 153d9724 Iustin Pop
                                           [new_drbd]):
3625 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3626 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3627 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3628 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3629 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3630 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3631 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3632 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3633 a8083063 Iustin Pop
3634 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3635 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3636 a8083063 Iustin Pop
3637 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3638 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3639 a8083063 Iustin Pop
    # return value
3640 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3641 a8083063 Iustin Pop
3642 a8083063 Iustin Pop
    # so check manually all the devices
3643 a8083063 Iustin Pop
    for name in iv_names:
3644 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3645 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3646 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3647 a8083063 Iustin Pop
      if is_degr:
3648 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3649 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3650 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3651 a8083063 Iustin Pop
      if is_degr:
3652 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3653 a8083063 Iustin Pop
3654 a8083063 Iustin Pop
    for name in iv_names:
3655 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3656 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3657 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3658 153d9724 Iustin Pop
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3659 153d9724 Iustin Pop
                                              dev, [child]):
3660 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3661 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3662 a8083063 Iustin Pop
        continue
3663 a8083063 Iustin Pop
3664 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3665 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3666 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3667 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3668 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3669 a8083063 Iustin Pop
                       " continuing operation." % node)
3670 a8083063 Iustin Pop
3671 a8083063 Iustin Pop
      dev.children.remove(child)
3672 a8083063 Iustin Pop
3673 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3674 a8083063 Iustin Pop
3675 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3676 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3677 a9e0c397 Iustin Pop

3678 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3679 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3680 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3681 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3682 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3683 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3684 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3685 a9e0c397 Iustin Pop
      - wait for sync across all devices
3686 a9e0c397 Iustin Pop
      - for each modified disk:
3687 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3688 a9e0c397 Iustin Pop

3689 a9e0c397 Iustin Pop
    Failures are not very well handled.
3690 cff90b79 Iustin Pop

3691 a9e0c397 Iustin Pop
    """
3692 cff90b79 Iustin Pop
    steps_total = 6
3693 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3694 a9e0c397 Iustin Pop
    instance = self.instance
3695 a9e0c397 Iustin Pop
    iv_names = {}
3696 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3697 a9e0c397 Iustin Pop
    # start of work
3698 a9e0c397 Iustin Pop
    cfg = self.cfg
3699 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3700 cff90b79 Iustin Pop
    oth_node = self.oth_node
3701 cff90b79 Iustin Pop
3702 cff90b79 Iustin Pop
    # Step: check device activation
3703 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3704 cff90b79 Iustin Pop
    info("checking volume groups")
3705 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3706 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3707 cff90b79 Iustin Pop
    if not results:
3708 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3709 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3710 cff90b79 Iustin Pop
      res = results.get(node, False)
3711 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3712 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3713 cff90b79 Iustin Pop
                                 (my_vg, node))
3714 cff90b79 Iustin Pop
    for dev in instance.disks:
3715 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3716 cff90b79 Iustin Pop
        continue
3717 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3718 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3719 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3720 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3721 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3722 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3723 cff90b79 Iustin Pop
3724 cff90b79 Iustin Pop
    # Step: check other node consistency
3725 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3726 cff90b79 Iustin Pop
    for dev in instance.disks:
3727 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3728 cff90b79 Iustin Pop
        continue
3729 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3730 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3731 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3732 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3733 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3734 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3735 cff90b79 Iustin Pop
3736 cff90b79 Iustin Pop
    # Step: create new storage
3737 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3738 a9e0c397 Iustin Pop
    for dev in instance.disks:
3739 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3740 a9e0c397 Iustin Pop
        continue
3741 a9e0c397 Iustin Pop
      size = dev.size
3742 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3743 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3744 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3745 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3746 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3747 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3748 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3749 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3750 a9e0c397 Iustin Pop
      old_lvs = dev.children
3751 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3752 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3753 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3754 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3755 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3756 a9e0c397 Iustin Pop
      # are talking about the secondary node
3757 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3758 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3759 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3760 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3761 a9e0c397 Iustin Pop
                                   " node '%s'" %
3762 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3763 a9e0c397 Iustin Pop
3764 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3765 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3766 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3767 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3768 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3769 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3770 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3771 cff90b79 Iustin Pop
      #dev.children = []
3772 cff90b79 Iustin Pop
      #cfg.Update(instance)
3773 a9e0c397 Iustin Pop
3774 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3775 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3776 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3777 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3778 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3779 cff90b79 Iustin Pop
3780 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3781 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3782 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3783 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3784 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3785 cff90b79 Iustin Pop
      rlist = []
3786 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3787 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3788 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3789 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3790 cff90b79 Iustin Pop
3791 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3792 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3793 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3794 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3795 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3796 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3797 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3798 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3799 cff90b79 Iustin Pop
3800 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3801 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3802 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3803 a9e0c397 Iustin Pop
3804 cff90b79 Iustin Pop
      for disk in old_lvs:
3805 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3806 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3807 a9e0c397 Iustin Pop
3808 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3809 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3810 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3811 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3812 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3813 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3814 cff90b79 Iustin Pop
                    " logical volumes")
3815 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3816 a9e0c397 Iustin Pop
3817 a9e0c397 Iustin Pop
      dev.children = new_lvs
3818 a9e0c397 Iustin Pop
      cfg.Update(instance)
3819 a9e0c397 Iustin Pop
3820 cff90b79 Iustin Pop
    # Step: wait for sync
3821 a9e0c397 Iustin Pop
3822 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3823 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3824 a9e0c397 Iustin Pop
    # return value
3825 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3826 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3827 a9e0c397 Iustin Pop
3828 a9e0c397 Iustin Pop
    # so check manually all the devices
3829 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3830 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3831 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3832 a9e0c397 Iustin Pop
      if is_degr:
3833 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3834 a9e0c397 Iustin Pop
3835 cff90b79 Iustin Pop
    # Step: remove old storage
3836 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3837 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3838 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3839 a9e0c397 Iustin Pop
      for lv in old_lvs:
3840 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3841 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3842 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3843 a9e0c397 Iustin Pop
          continue
3844 a9e0c397 Iustin Pop
3845 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3846 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3847 a9e0c397 Iustin Pop

3848 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3849 a9e0c397 Iustin Pop
      - for all disks of the instance:
3850 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3851 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3852 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3853 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3854 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3855 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3856 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3857 a9e0c397 Iustin Pop
          not network enabled
3858 a9e0c397 Iustin Pop
      - wait for sync across all devices
3859 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3860 a9e0c397 Iustin Pop

3861 a9e0c397 Iustin Pop
    Failures are not very well handled.
3862 0834c866 Iustin Pop

3863 a9e0c397 Iustin Pop
    """
3864 0834c866 Iustin Pop
    steps_total = 6
3865 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3866 a9e0c397 Iustin Pop
    instance = self.instance
3867 a9e0c397 Iustin Pop
    iv_names = {}
3868 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3869 a9e0c397 Iustin Pop
    # start of work
3870 a9e0c397 Iustin Pop
    cfg = self.cfg
3871 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3872 a9e0c397 Iustin Pop
    new_node = self.new_node
3873 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3874 0834c866 Iustin Pop
3875 0834c866 Iustin Pop
    # Step: check device activation
3876 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3877 0834c866 Iustin Pop
    info("checking volume groups")
3878 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3879 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3880 0834c866 Iustin Pop
    if not results:
3881 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3882 0834c866 Iustin Pop
    for node in pri_node, new_node:
3883 0834c866 Iustin Pop
      res = results.get(node, False)
3884 0834c866 Iustin Pop
      if not res or my_vg not in res:
3885 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3886 0834c866 Iustin Pop
                                 (my_vg, node))
3887 0834c866 Iustin Pop
    for dev in instance.disks:
3888 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3889 0834c866 Iustin Pop
        continue
3890 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3891 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3892 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3893 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3894 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3895 0834c866 Iustin Pop
3896 0834c866 Iustin Pop
    # Step: check other node consistency
3897 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3898 0834c866 Iustin Pop
    for dev in instance.disks:
3899 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3900 0834c866 Iustin Pop
        continue
3901 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3902 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3903 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3904 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3905 0834c866 Iustin Pop
                                 pri_node)
3906 0834c866 Iustin Pop
3907 0834c866 Iustin Pop
    # Step: create new storage
3908 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3909 a9e0c397 Iustin Pop
    for dev in instance.disks:
3910 a9e0c397 Iustin Pop
      size = dev.size
3911 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3912 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3913 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3914 a9e0c397 Iustin Pop
      # are talking about the secondary node
3915 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3916 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3917 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3918 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3919 a9e0c397 Iustin Pop
                                   " node '%s'" %
3920 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3921 a9e0c397 Iustin Pop
3922 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3923 0834c866 Iustin Pop
3924 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3925 0834c866 Iustin Pop
    for dev in instance.disks:
3926 0834c866 Iustin Pop
      size = dev.size
3927 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3928 a9e0c397 Iustin Pop
      # create new devices on new_node
3929 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3930 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3931 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3932 a9e0c397 Iustin Pop
                              children=dev.children)
3933 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3934 3f78eef2 Iustin Pop
                                        new_drbd, False,
3935 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3936 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3937 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3938 a9e0c397 Iustin Pop
3939 0834c866 Iustin Pop
    for dev in instance.disks:
3940 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3941 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3942 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3943 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3944 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3945 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3946 a9e0c397 Iustin Pop
3947 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3948 642445d9 Iustin Pop
    done = 0
3949 642445d9 Iustin Pop
    for dev in instance.disks:
3950 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3951 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3952 642445d9 Iustin Pop
      # detach from network
3953 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3954 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3955 642445d9 Iustin Pop
      # standalone state
3956 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3957 642445d9 Iustin Pop
        done += 1
3958 642445d9 Iustin Pop
      else:
3959 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3960 642445d9 Iustin Pop
                dev.iv_name)
3961 642445d9 Iustin Pop
3962 642445d9 Iustin Pop
    if not done:
3963 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3964 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3965 642445d9 Iustin Pop
3966 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3967 642445d9 Iustin Pop
    # the instance to point to the new secondary
3968 642445d9 Iustin Pop
    info("updating instance configuration")
3969 642445d9 Iustin Pop
    for dev in instance.disks:
3970 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3971 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3972 642445d9 Iustin Pop
    cfg.Update(instance)
3973 a9e0c397 Iustin Pop
3974 642445d9 Iustin Pop
    # and now perform the drbd attach
3975 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3976 642445d9 Iustin Pop
    failures = []
3977 642445d9 Iustin Pop
    for dev in instance.disks:
3978 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3979 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3980 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3981 642445d9 Iustin Pop
      # is correct
3982 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3983 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3984 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3985 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3986 a9e0c397 Iustin Pop
3987 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3988 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3989 a9e0c397 Iustin Pop
    # return value
3990 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3991 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3992 a9e0c397 Iustin Pop
3993 a9e0c397 Iustin Pop
    # so check manually all the devices
3994 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3995 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3996 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3997 a9e0c397 Iustin Pop
      if is_degr:
3998 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3999 a9e0c397 Iustin Pop
4000 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
4001 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
4002 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
4003 a9e0c397 Iustin Pop
      for lv in old_lvs:
4004 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
4005 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
4006 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
4007 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
4008 a9e0c397 Iustin Pop
4009 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4010 a9e0c397 Iustin Pop
    """Execute disk replacement.
4011 a9e0c397 Iustin Pop

4012 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4013 a9e0c397 Iustin Pop

4014 a9e0c397 Iustin Pop
    """
4015 a9e0c397 Iustin Pop
    instance = self.instance
4016 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_REMOTE_RAID1:
4017 a9e0c397 Iustin Pop
      fn = self._ExecRR1
4018 a9e0c397 Iustin Pop
    elif instance.disk_template == constants.DT_DRBD8:
4019 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4020 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4021 a9e0c397 Iustin Pop
      else:
4022 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4023 a9e0c397 Iustin Pop
    else:
4024 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4025 a9e0c397 Iustin Pop
    return fn(feedback_fn)
4026 a9e0c397 Iustin Pop
4027 a8083063 Iustin Pop
4028 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4029 a8083063 Iustin Pop
  """Query runtime instance data.
4030 a8083063 Iustin Pop

4031 a8083063 Iustin Pop
  """
4032 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4033 a8083063 Iustin Pop
4034 a8083063 Iustin Pop
  def CheckPrereq(self):
4035 a8083063 Iustin Pop
    """Check prerequisites.
4036 a8083063 Iustin Pop

4037 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4038 a8083063 Iustin Pop

4039 a8083063 Iustin Pop
    """
4040 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
4041 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4042 a8083063 Iustin Pop
    if self.op.instances:
4043 a8083063 Iustin Pop
      self.wanted_instances = []
4044 a8083063 Iustin Pop
      names = self.op.instances
4045 a8083063 Iustin Pop
      for name in names:
4046 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
4047 a8083063 Iustin Pop
        if instance is None:
4048 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
4049 515207af Guido Trotter
        self.wanted_instances.append(instance)
4050 a8083063 Iustin Pop
    else:
4051 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4052 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
4053 a8083063 Iustin Pop
    return
4054 a8083063 Iustin Pop
4055 a8083063 Iustin Pop
4056 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4057 a8083063 Iustin Pop
    """Compute block device status.
4058 a8083063 Iustin Pop

4059 a8083063 Iustin Pop
    """
4060 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4061 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4062 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4063 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4064 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4065 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4066 a8083063 Iustin Pop
      else:
4067 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4068 a8083063 Iustin Pop
4069 a8083063 Iustin Pop
    if snode:
4070 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4071 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4072 a8083063 Iustin Pop
    else:
4073 a8083063 Iustin Pop
      dev_sstatus = None
4074 a8083063 Iustin Pop
4075 a8083063 Iustin Pop
    if dev.children:
4076 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4077 a8083063 Iustin Pop
                      for child in dev.children]
4078 a8083063 Iustin Pop
    else:
4079 a8083063 Iustin Pop
      dev_children = []
4080 a8083063 Iustin Pop
4081 a8083063 Iustin Pop
    data = {
4082 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4083 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4084 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4085 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4086 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4087 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4088 a8083063 Iustin Pop
      "children": dev_children,
4089 a8083063 Iustin Pop
      }
4090 a8083063 Iustin Pop
4091 a8083063 Iustin Pop
    return data
4092 a8083063 Iustin Pop
4093 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4094 a8083063 Iustin Pop
    """Gather and return data"""
4095 a8083063 Iustin Pop
    result = {}
4096 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4097 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4098 a8083063 Iustin Pop
                                                instance.name)
4099 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4100 a8083063 Iustin Pop
        remote_state = "up"
4101 a8083063 Iustin Pop
      else:
4102 a8083063 Iustin Pop
        remote_state = "down"
4103 a8083063 Iustin Pop
      if instance.status == "down":
4104 a8083063 Iustin Pop
        config_state = "down"
4105 a8083063 Iustin Pop
      else:
4106 a8083063 Iustin Pop
        config_state = "up"
4107 a8083063 Iustin Pop
4108 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4109 a8083063 Iustin Pop
               for device in instance.disks]
4110 a8083063 Iustin Pop
4111 a8083063 Iustin Pop
      idict = {
4112 a8083063 Iustin Pop
        "name": instance.name,
4113 a8083063 Iustin Pop
        "config_state": config_state,
4114 a8083063 Iustin Pop
        "run_state": remote_state,
4115 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4116 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4117 a8083063 Iustin Pop
        "os": instance.os,
4118 a8083063 Iustin Pop
        "memory": instance.memory,
4119 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4120 a8083063 Iustin Pop
        "disks": disks,
4121 58acb49d Alexander Schreiber
        "network_port": instance.network_port,
4122 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4123 71aa8f73 Iustin Pop
        "kernel_path": instance.kernel_path,
4124 71aa8f73 Iustin Pop
        "initrd_path": instance.initrd_path,
4125 8ae6bb54 Iustin Pop
        "hvm_boot_order": instance.hvm_boot_order,
4126 a8083063 Iustin Pop
        }
4127 a8083063 Iustin Pop
4128 a8083063 Iustin Pop
      result[instance.name] = idict
4129 a8083063 Iustin Pop
4130 a8083063 Iustin Pop
    return result
4131 a8083063 Iustin Pop
4132 a8083063 Iustin Pop
4133 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
4134 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4135 a8083063 Iustin Pop

4136 a8083063 Iustin Pop
  """
4137 a8083063 Iustin Pop
  HPATH = "instance-modify"
4138 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4139 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4140 a8083063 Iustin Pop
4141 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4142 a8083063 Iustin Pop
    """Build hooks env.
4143 a8083063 Iustin Pop

4144 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4145 a8083063 Iustin Pop

4146 a8083063 Iustin Pop
    """
4147 396e1b78 Michael Hanselmann
    args = dict()
4148 a8083063 Iustin Pop
    if self.mem:
4149 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4150 a8083063 Iustin Pop
    if self.vcpus:
4151 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4152 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4153 396e1b78 Michael Hanselmann
      if self.do_ip:
4154 396e1b78 Michael Hanselmann
        ip = self.ip
4155 396e1b78 Michael Hanselmann
      else:
4156 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4157 396e1b78 Michael Hanselmann
      if self.bridge:
4158 396e1b78 Michael Hanselmann
        bridge = self.bridge
4159 396e1b78 Michael Hanselmann
      else:
4160 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4161 ef756965 Iustin Pop
      if self.mac:
4162 ef756965 Iustin Pop
        mac = self.mac
4163 ef756965 Iustin Pop
      else:
4164 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4165 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4166 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4167 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4168 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4169 a8083063 Iustin Pop
    return env, nl, nl
4170 a8083063 Iustin Pop
4171 a8083063 Iustin Pop
  def CheckPrereq(self):
4172 a8083063 Iustin Pop
    """Check prerequisites.
4173 a8083063 Iustin Pop

4174 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4175 a8083063 Iustin Pop

4176 a8083063 Iustin Pop
    """
4177 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4178 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4179 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4180 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4181 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4182 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4183 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4184 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4185 973d7867 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4186 25c5878d Alexander Schreiber
                 self.kernel_path, self.initrd_path, self.hvm_boot_order]
4187 973d7867 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4188 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4189 a8083063 Iustin Pop
    if self.mem is not None:
4190 a8083063 Iustin Pop
      try:
4191 a8083063 Iustin Pop
        self.mem = int(self.mem)
4192 a8083063 Iustin Pop
      except ValueError, err:
4193 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4194 a8083063 Iustin Pop
    if self.vcpus is not None:
4195 a8083063 Iustin Pop
      try:
4196 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4197 a8083063 Iustin Pop
      except ValueError, err:
4198 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4199 a8083063 Iustin Pop
    if self.ip is not None:
4200 a8083063 Iustin Pop
      self.do_ip = True
4201 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4202 a8083063 Iustin Pop
        self.ip = None
4203 a8083063 Iustin Pop
      else:
4204 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4205 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4206 a8083063 Iustin Pop
    else:
4207 a8083063 Iustin Pop
      self.do_ip = False
4208 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4209 1862d460 Alexander Schreiber
    if self.mac is not None:
4210 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4211 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4212 1862d460 Alexander Schreiber
                                   self.mac)
4213 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4214 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4215 a8083063 Iustin Pop
4216 973d7867 Iustin Pop
    if self.kernel_path is not None:
4217 973d7867 Iustin Pop
      self.do_kernel_path = True
4218 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4219 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4220 973d7867 Iustin Pop
4221 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4222 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4223 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4224 973d7867 Iustin Pop
                                    " filename")
4225 8cafeb26 Iustin Pop
    else:
4226 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4227 973d7867 Iustin Pop
4228 973d7867 Iustin Pop
    if self.initrd_path is not None:
4229 973d7867 Iustin Pop
      self.do_initrd_path = True
4230 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4231 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4232 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4233 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4234 973d7867 Iustin Pop
                                    " filename")
4235 8cafeb26 Iustin Pop
    else:
4236 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4237 973d7867 Iustin Pop
4238 25c5878d Alexander Schreiber
    # boot order verification
4239 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4240 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4241 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4242 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4243 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4244 25c5878d Alexander Schreiber
                                     " or 'default'")
4245 25c5878d Alexander Schreiber
4246 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4247 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4248 a8083063 Iustin Pop
    if instance is None:
4249 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
4250 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4251 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4252 a8083063 Iustin Pop
    self.instance = instance
4253 a8083063 Iustin Pop
    return
4254 a8083063 Iustin Pop
4255 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4256 a8083063 Iustin Pop
    """Modifies an instance.
4257 a8083063 Iustin Pop

4258 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4259 a8083063 Iustin Pop
    """
4260 a8083063 Iustin Pop
    result = []
4261 a8083063 Iustin Pop
    instance = self.instance
4262 a8083063 Iustin Pop
    if self.mem:
4263 a8083063 Iustin Pop
      instance.memory = self.mem
4264 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4265 a8083063 Iustin Pop
    if self.vcpus:
4266 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4267 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4268 a8083063 Iustin Pop
    if self.do_ip:
4269 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4270 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4271 a8083063 Iustin Pop
    if self.bridge:
4272 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4273 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4274 1862d460 Alexander Schreiber
    if self.mac:
4275 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4276 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4277 973d7867 Iustin Pop
    if self.do_kernel_path:
4278 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4279 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4280 973d7867 Iustin Pop
    if self.do_initrd_path:
4281 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4282 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4283 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4284 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4285 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4286 25c5878d Alexander Schreiber
      else:
4287 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4288 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4289 a8083063 Iustin Pop
4290 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4291 a8083063 Iustin Pop
4292 a8083063 Iustin Pop
    return result
4293 a8083063 Iustin Pop
4294 a8083063 Iustin Pop
4295 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4296 a8083063 Iustin Pop
  """Query the exports list
4297 a8083063 Iustin Pop

4298 a8083063 Iustin Pop
  """
4299 a8083063 Iustin Pop
  _OP_REQP = []
4300 a8083063 Iustin Pop
4301 a8083063 Iustin Pop
  def CheckPrereq(self):
4302 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4303 a8083063 Iustin Pop

4304 a8083063 Iustin Pop
    """
4305 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4306 a8083063 Iustin Pop
4307 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4308 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4309 a8083063 Iustin Pop

4310 a8083063 Iustin Pop
    Returns:
4311 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4312 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4313 a8083063 Iustin Pop
      that node.
4314 a8083063 Iustin Pop

4315 a8083063 Iustin Pop
    """
4316 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4317 a8083063 Iustin Pop
4318 a8083063 Iustin Pop
4319 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4320 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4321 a8083063 Iustin Pop

4322 a8083063 Iustin Pop
  """
4323 a8083063 Iustin Pop
  HPATH = "instance-export"
4324 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4325 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4326 a8083063 Iustin Pop
4327 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4328 a8083063 Iustin Pop
    """Build hooks env.
4329 a8083063 Iustin Pop

4330 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4331 a8083063 Iustin Pop

4332 a8083063 Iustin Pop
    """
4333 a8083063 Iustin Pop
    env = {
4334 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4335 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4336 a8083063 Iustin Pop
      }
4337 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4338 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4339 a8083063 Iustin Pop
          self.op.target_node]
4340 a8083063 Iustin Pop
    return env, nl, nl
4341 a8083063 Iustin Pop
4342 a8083063 Iustin Pop
  def CheckPrereq(self):
4343 a8083063 Iustin Pop
    """Check prerequisites.
4344 a8083063 Iustin Pop

4345 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
4346 a8083063 Iustin Pop

4347 a8083063 Iustin Pop
    """
4348 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4349 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4350 a8083063 Iustin Pop
    if self.instance is None:
4351 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4352 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4353 a8083063 Iustin Pop
4354 a8083063 Iustin Pop
    # node verification
4355 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4356 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4357 a8083063 Iustin Pop
4358 a8083063 Iustin Pop
    if self.dst_node is None:
4359 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4360 3ecf6786 Iustin Pop
                                 self.op.target_node)
4361 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4362 a8083063 Iustin Pop
4363 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4364 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4365 a8083063 Iustin Pop

4366 a8083063 Iustin Pop
    """
4367 a8083063 Iustin Pop
    instance = self.instance
4368 a8083063 Iustin Pop
    dst_node = self.dst_node
4369 a8083063 Iustin Pop
    src_node = instance.primary_node
4370 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
4371 a8083063 Iustin Pop
    if self.op.shutdown:
4372 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
4373 5bfac263 Iustin Pop
      self.proc.ChainOpCode(op)
4374 a8083063 Iustin Pop
4375 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4376 a8083063 Iustin Pop
4377 a8083063 Iustin Pop
    snap_disks = []
4378 a8083063 Iustin Pop
4379 a8083063 Iustin Pop
    try:
4380 a8083063 Iustin Pop
      for disk in instance.disks:
4381 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4382 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4383 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4384 a8083063 Iustin Pop
4385 a8083063 Iustin Pop
          if not new_dev_name:
4386 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4387 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4388 a8083063 Iustin Pop
          else:
4389 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4390 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4391 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4392 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4393 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4394 a8083063 Iustin Pop
4395 a8083063 Iustin Pop
    finally:
4396 a8083063 Iustin Pop
      if self.op.shutdown:
4397 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
4398 a8083063 Iustin Pop
                                       force=False)
4399 5bfac263 Iustin Pop
        self.proc.ChainOpCode(op)
4400 a8083063 Iustin Pop
4401 a8083063 Iustin Pop
    # TODO: check for size
4402 a8083063 Iustin Pop
4403 a8083063 Iustin Pop
    for dev in snap_disks:
4404 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
4405 a8083063 Iustin Pop
                                           instance):
4406 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
4407 a8083063 Iustin Pop
                     " %s to node %s" %
4408 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
4409 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4410 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
4411 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
4412 a8083063 Iustin Pop
4413 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4414 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4415 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4416 a8083063 Iustin Pop
4417 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4418 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4419 a8083063 Iustin Pop
4420 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4421 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4422 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4423 a8083063 Iustin Pop
    if nodelist:
4424 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4425 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4426 a8083063 Iustin Pop
      for node in exportlist:
4427 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4428 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4429 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4430 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4431 5c947f38 Iustin Pop
4432 5c947f38 Iustin Pop
4433 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4434 5c947f38 Iustin Pop
  """Generic tags LU.
4435 5c947f38 Iustin Pop

4436 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4437 5c947f38 Iustin Pop

4438 5c947f38 Iustin Pop
  """
4439 5c947f38 Iustin Pop
  def CheckPrereq(self):
4440 5c947f38 Iustin Pop
    """Check prerequisites.
4441 5c947f38 Iustin Pop

4442 5c947f38 Iustin Pop
    """
4443 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4444 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4445 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4446 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4447 5c947f38 Iustin Pop
      if name is None:
4448 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4449 3ecf6786 Iustin Pop
                                   (self.op.name,))
4450 5c947f38 Iustin Pop
      self.op.name = name
4451 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4452 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4453 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4454 5c947f38 Iustin Pop
      if name is None:
4455 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4456 3ecf6786 Iustin Pop
                                   (self.op.name,))
4457 5c947f38 Iustin Pop
      self.op.name = name
4458 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4459 5c947f38 Iustin Pop
    else:
4460 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4461 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4462 5c947f38 Iustin Pop
4463 5c947f38 Iustin Pop
4464 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4465 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4466 5c947f38 Iustin Pop

4467 5c947f38 Iustin Pop
  """
4468 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4469 5c947f38 Iustin Pop
4470 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4471 5c947f38 Iustin Pop
    """Returns the tag list.
4472 5c947f38 Iustin Pop

4473 5c947f38 Iustin Pop
    """
4474 5c947f38 Iustin Pop
    return self.target.GetTags()
4475 5c947f38 Iustin Pop
4476 5c947f38 Iustin Pop
4477 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4478 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4479 73415719 Iustin Pop

4480 73415719 Iustin Pop
  """
4481 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4482 73415719 Iustin Pop
4483 73415719 Iustin Pop
  def CheckPrereq(self):
4484 73415719 Iustin Pop
    """Check prerequisites.
4485 73415719 Iustin Pop

4486 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4487 73415719 Iustin Pop

4488 73415719 Iustin Pop
    """
4489 73415719 Iustin Pop
    try:
4490 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4491 73415719 Iustin Pop
    except re.error, err:
4492 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4493 73415719 Iustin Pop
                                 (self.op.pattern, err))
4494 73415719 Iustin Pop
4495 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4496 73415719 Iustin Pop
    """Returns the tag list.
4497 73415719 Iustin Pop

4498 73415719 Iustin Pop
    """
4499 73415719 Iustin Pop
    cfg = self.cfg
4500 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4501 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4502 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4503 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4504 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4505 73415719 Iustin Pop
    results = []
4506 73415719 Iustin Pop
    for path, target in tgts:
4507 73415719 Iustin Pop
      for tag in target.GetTags():
4508 73415719 Iustin Pop
        if self.re.search(tag):
4509 73415719 Iustin Pop
          results.append((path, tag))
4510 73415719 Iustin Pop
    return results
4511 73415719 Iustin Pop
4512 73415719 Iustin Pop
4513 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4514 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4515 5c947f38 Iustin Pop

4516 5c947f38 Iustin Pop
  """
4517 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4518 5c947f38 Iustin Pop
4519 5c947f38 Iustin Pop
  def CheckPrereq(self):
4520 5c947f38 Iustin Pop
    """Check prerequisites.
4521 5c947f38 Iustin Pop

4522 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4523 5c947f38 Iustin Pop

4524 5c947f38 Iustin Pop
    """
4525 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4526 f27302fa Iustin Pop
    for tag in self.op.tags:
4527 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4528 5c947f38 Iustin Pop
4529 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4530 5c947f38 Iustin Pop
    """Sets the tag.
4531 5c947f38 Iustin Pop

4532 5c947f38 Iustin Pop
    """
4533 5c947f38 Iustin Pop
    try:
4534 f27302fa Iustin Pop
      for tag in self.op.tags:
4535 f27302fa Iustin Pop
        self.target.AddTag(tag)
4536 5c947f38 Iustin Pop
    except errors.TagError, err:
4537 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4538 5c947f38 Iustin Pop
    try:
4539 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4540 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4541 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4542 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4543 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4544 5c947f38 Iustin Pop
4545 5c947f38 Iustin Pop
4546 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4547 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4548 5c947f38 Iustin Pop

4549 5c947f38 Iustin Pop
  """
4550 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4551 5c947f38 Iustin Pop
4552 5c947f38 Iustin Pop
  def CheckPrereq(self):
4553 5c947f38 Iustin Pop
    """Check prerequisites.
4554 5c947f38 Iustin Pop

4555 5c947f38 Iustin Pop
    This checks that we have the given tag.
4556 5c947f38 Iustin Pop

4557 5c947f38 Iustin Pop
    """
4558 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4559 f27302fa Iustin Pop
    for tag in self.op.tags:
4560 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4561 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4562 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4563 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4564 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4565 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4566 f27302fa Iustin Pop
      diff_names.sort()
4567 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4568 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4569 5c947f38 Iustin Pop
4570 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4571 5c947f38 Iustin Pop
    """Remove the tag from the object.
4572 5c947f38 Iustin Pop

4573 5c947f38 Iustin Pop
    """
4574 f27302fa Iustin Pop
    for tag in self.op.tags:
4575 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4576 5c947f38 Iustin Pop
    try:
4577 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4578 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4579 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4580 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4581 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4582 06009e27 Iustin Pop
4583 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4584 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4585 06009e27 Iustin Pop

4586 06009e27 Iustin Pop
  This LU sleeps on the master and/or nodes for a specified amoutn of
4587 06009e27 Iustin Pop
  time.
4588 06009e27 Iustin Pop

4589 06009e27 Iustin Pop
  """
4590 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4591 06009e27 Iustin Pop
4592 06009e27 Iustin Pop
  def CheckPrereq(self):
4593 06009e27 Iustin Pop
    """Check prerequisites.
4594 06009e27 Iustin Pop

4595 06009e27 Iustin Pop
    This checks that we have a good list of nodes and/or the duration
4596 06009e27 Iustin Pop
    is valid.
4597 06009e27 Iustin Pop

4598 06009e27 Iustin Pop
    """
4599 06009e27 Iustin Pop
4600 06009e27 Iustin Pop
    if self.op.on_nodes:
4601 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4602 06009e27 Iustin Pop
4603 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4604 06009e27 Iustin Pop
    """Do the actual sleep.
4605 06009e27 Iustin Pop

4606 06009e27 Iustin Pop
    """
4607 06009e27 Iustin Pop
    if self.op.on_master:
4608 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4609 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4610 06009e27 Iustin Pop
    if self.op.on_nodes:
4611 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4612 06009e27 Iustin Pop
      if not result:
4613 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4614 06009e27 Iustin Pop
      for node, node_result in result.items():
4615 06009e27 Iustin Pop
        if not node_result:
4616 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4617 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))