Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ b63ed789

History | View | Annotate | Download (147.8 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 a8083063 Iustin Pop
46 a8083063 Iustin Pop
class LogicalUnit(object):
47 396e1b78 Michael Hanselmann
  """Logical Unit base class.
48 a8083063 Iustin Pop

49 a8083063 Iustin Pop
  Subclasses must follow these rules:
50 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
51 a8083063 Iustin Pop
      with all the fields (even if as None)
52 a8083063 Iustin Pop
    - implement Exec
53 a8083063 Iustin Pop
    - implement BuildHooksEnv
54 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
55 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
56 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
57 a8083063 Iustin Pop

58 a8083063 Iustin Pop
  """
59 a8083063 Iustin Pop
  HPATH = None
60 a8083063 Iustin Pop
  HTYPE = None
61 a8083063 Iustin Pop
  _OP_REQP = []
62 a8083063 Iustin Pop
  REQ_CLUSTER = True
63 a8083063 Iustin Pop
  REQ_MASTER = True
64 a8083063 Iustin Pop
65 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
66 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
67 a8083063 Iustin Pop

68 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
69 a8083063 Iustin Pop
    validity.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    """
72 5bfac263 Iustin Pop
    self.proc = processor
73 a8083063 Iustin Pop
    self.op = op
74 a8083063 Iustin Pop
    self.cfg = cfg
75 a8083063 Iustin Pop
    self.sstore = sstore
76 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
77 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
78 a8083063 Iustin Pop
      if attr_val is None:
79 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
80 3ecf6786 Iustin Pop
                                   attr_name)
81 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
82 a8083063 Iustin Pop
      if not cfg.IsCluster():
83 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
84 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
85 a8083063 Iustin Pop
      if self.REQ_MASTER:
86 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
87 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
88 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
89 3ecf6786 Iustin Pop
                                     " node %s" % master)
90 a8083063 Iustin Pop
91 a8083063 Iustin Pop
  def CheckPrereq(self):
92 a8083063 Iustin Pop
    """Check prerequisites for this LU.
93 a8083063 Iustin Pop

94 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
95 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
96 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
97 a8083063 Iustin Pop
    allowed.
98 a8083063 Iustin Pop

99 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
100 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
101 a8083063 Iustin Pop

102 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
103 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
104 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
105 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
106 a8083063 Iustin Pop

107 a8083063 Iustin Pop
    """
108 a8083063 Iustin Pop
    raise NotImplementedError
109 a8083063 Iustin Pop
110 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
111 a8083063 Iustin Pop
    """Execute the LU.
112 a8083063 Iustin Pop

113 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
114 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
115 a8083063 Iustin Pop
    code, or expected.
116 a8083063 Iustin Pop

117 a8083063 Iustin Pop
    """
118 a8083063 Iustin Pop
    raise NotImplementedError
119 a8083063 Iustin Pop
120 a8083063 Iustin Pop
  def BuildHooksEnv(self):
121 a8083063 Iustin Pop
    """Build hooks environment for this LU.
122 a8083063 Iustin Pop

123 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
124 a8083063 Iustin Pop
    containing the environment that will be used for running the
125 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
126 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
127 a8083063 Iustin Pop
    the hook should run after the execution.
128 a8083063 Iustin Pop

129 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
130 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
131 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
132 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
133 a8083063 Iustin Pop

134 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
135 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
136 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
137 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
138 a8083063 Iustin Pop
    None).
139 a8083063 Iustin Pop

140 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
141 a8083063 Iustin Pop
    not be called.
142 a8083063 Iustin Pop

143 a8083063 Iustin Pop
    """
144 a8083063 Iustin Pop
    raise NotImplementedError
145 a8083063 Iustin Pop
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
148 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
149 a8083063 Iustin Pop

150 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
151 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
152 a8083063 Iustin Pop

153 a8083063 Iustin Pop
  """
154 a8083063 Iustin Pop
  HPATH = None
155 a8083063 Iustin Pop
  HTYPE = None
156 a8083063 Iustin Pop
157 a8083063 Iustin Pop
  def BuildHooksEnv(self):
158 a8083063 Iustin Pop
    """Build hooks env.
159 a8083063 Iustin Pop

160 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
161 a8083063 Iustin Pop

162 a8083063 Iustin Pop
    """
163 0e137c28 Iustin Pop
    return {}, [], []
164 a8083063 Iustin Pop
165 a8083063 Iustin Pop
166 9440aeab Michael Hanselmann
def _AddHostToEtcHosts(hostname):
167 9440aeab Michael Hanselmann
  """Wrapper around utils.SetEtcHostsEntry.
168 9440aeab Michael Hanselmann

169 9440aeab Michael Hanselmann
  """
170 9440aeab Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
171 9440aeab Michael Hanselmann
  utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
172 9440aeab Michael Hanselmann
173 9440aeab Michael Hanselmann
174 c8a0948f Michael Hanselmann
def _RemoveHostFromEtcHosts(hostname):
175 9440aeab Michael Hanselmann
  """Wrapper around utils.RemoveEtcHostsEntry.
176 c8a0948f Michael Hanselmann

177 c8a0948f Michael Hanselmann
  """
178 c8a0948f Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
179 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
180 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
181 c8a0948f Michael Hanselmann
182 c8a0948f Michael Hanselmann
183 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
184 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
185 83120a01 Michael Hanselmann

186 83120a01 Michael Hanselmann
  Args:
187 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
188 83120a01 Michael Hanselmann

189 83120a01 Michael Hanselmann
  """
190 3312b702 Iustin Pop
  if not isinstance(nodes, list):
191 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
192 dcb93971 Michael Hanselmann
193 dcb93971 Michael Hanselmann
  if nodes:
194 3312b702 Iustin Pop
    wanted = []
195 dcb93971 Michael Hanselmann
196 dcb93971 Michael Hanselmann
    for name in nodes:
197 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
198 dcb93971 Michael Hanselmann
      if node is None:
199 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
200 3312b702 Iustin Pop
      wanted.append(node)
201 dcb93971 Michael Hanselmann
202 dcb93971 Michael Hanselmann
  else:
203 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
204 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
205 3312b702 Iustin Pop
206 3312b702 Iustin Pop
207 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
208 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
209 3312b702 Iustin Pop

210 3312b702 Iustin Pop
  Args:
211 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
212 3312b702 Iustin Pop

213 3312b702 Iustin Pop
  """
214 3312b702 Iustin Pop
  if not isinstance(instances, list):
215 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
216 3312b702 Iustin Pop
217 3312b702 Iustin Pop
  if instances:
218 3312b702 Iustin Pop
    wanted = []
219 3312b702 Iustin Pop
220 3312b702 Iustin Pop
    for name in instances:
221 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
222 3312b702 Iustin Pop
      if instance is None:
223 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
224 3312b702 Iustin Pop
      wanted.append(instance)
225 3312b702 Iustin Pop
226 3312b702 Iustin Pop
  else:
227 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
228 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
229 dcb93971 Michael Hanselmann
230 dcb93971 Michael Hanselmann
231 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
232 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
233 83120a01 Michael Hanselmann

234 83120a01 Michael Hanselmann
  Args:
235 83120a01 Michael Hanselmann
    static: Static fields
236 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
237 83120a01 Michael Hanselmann

238 83120a01 Michael Hanselmann
  """
239 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
240 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
241 dcb93971 Michael Hanselmann
242 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
243 dcb93971 Michael Hanselmann
244 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
245 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
246 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
247 3ecf6786 Iustin Pop
                                          difference(all_fields)))
248 dcb93971 Michael Hanselmann
249 dcb93971 Michael Hanselmann
250 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
251 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
252 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
253 ecb215b5 Michael Hanselmann

254 ecb215b5 Michael Hanselmann
  Args:
255 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
256 396e1b78 Michael Hanselmann
  """
257 396e1b78 Michael Hanselmann
  env = {
258 0e137c28 Iustin Pop
    "OP_TARGET": name,
259 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
260 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
261 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
262 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
263 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
264 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
265 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
266 396e1b78 Michael Hanselmann
  }
267 396e1b78 Michael Hanselmann
268 396e1b78 Michael Hanselmann
  if nics:
269 396e1b78 Michael Hanselmann
    nic_count = len(nics)
270 396e1b78 Michael Hanselmann
    for idx, (ip, bridge) in enumerate(nics):
271 396e1b78 Michael Hanselmann
      if ip is None:
272 396e1b78 Michael Hanselmann
        ip = ""
273 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
274 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
275 396e1b78 Michael Hanselmann
  else:
276 396e1b78 Michael Hanselmann
    nic_count = 0
277 396e1b78 Michael Hanselmann
278 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
279 396e1b78 Michael Hanselmann
280 396e1b78 Michael Hanselmann
  return env
281 396e1b78 Michael Hanselmann
282 396e1b78 Michael Hanselmann
283 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
284 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
285 ecb215b5 Michael Hanselmann

286 ecb215b5 Michael Hanselmann
  Args:
287 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
288 ecb215b5 Michael Hanselmann
    override: dict of values to override
289 ecb215b5 Michael Hanselmann
  """
290 396e1b78 Michael Hanselmann
  args = {
291 396e1b78 Michael Hanselmann
    'name': instance.name,
292 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
293 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
294 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
295 396e1b78 Michael Hanselmann
    'status': instance.os,
296 396e1b78 Michael Hanselmann
    'memory': instance.memory,
297 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
298 396e1b78 Michael Hanselmann
    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
299 396e1b78 Michael Hanselmann
  }
300 396e1b78 Michael Hanselmann
  if override:
301 396e1b78 Michael Hanselmann
    args.update(override)
302 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
303 396e1b78 Michael Hanselmann
304 396e1b78 Michael Hanselmann
305 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
306 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
307 a8083063 Iustin Pop

308 a8083063 Iustin Pop
  Args:
309 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
310 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
311 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
312 a8083063 Iustin Pop

313 a8083063 Iustin Pop
  """
314 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
315 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
316 a8083063 Iustin Pop
  else:
317 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
318 a8083063 Iustin Pop
319 a8083063 Iustin Pop
  inthere = False
320 a8083063 Iustin Pop
321 a8083063 Iustin Pop
  save_lines = []
322 a8083063 Iustin Pop
  add_lines = []
323 a8083063 Iustin Pop
  removed = False
324 a8083063 Iustin Pop
325 4cc2a728 Michael Hanselmann
  for rawline in f:
326 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
327 a8083063 Iustin Pop
328 4cc2a728 Michael Hanselmann
    parts = rawline.rstrip('\r\n').split()
329 4cc2a728 Michael Hanselmann
330 4cc2a728 Michael Hanselmann
    # Ignore unwanted lines
331 4cc2a728 Michael Hanselmann
    if len(parts) >= 3 and not rawline.lstrip()[0] == '#':
332 4cc2a728 Michael Hanselmann
      fields = parts[0].split(',')
333 4cc2a728 Michael Hanselmann
      key = parts[2]
334 4cc2a728 Michael Hanselmann
335 4cc2a728 Michael Hanselmann
      haveall = True
336 4cc2a728 Michael Hanselmann
      havesome = False
337 4cc2a728 Michael Hanselmann
      for spec in [ ip, fullnode ]:
338 4cc2a728 Michael Hanselmann
        if spec not in fields:
339 4cc2a728 Michael Hanselmann
          haveall = False
340 4cc2a728 Michael Hanselmann
        if spec in fields:
341 4cc2a728 Michael Hanselmann
          havesome = True
342 4cc2a728 Michael Hanselmann
343 4cc2a728 Michael Hanselmann
      logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
344 4cc2a728 Michael Hanselmann
      if haveall and key == pubkey:
345 4cc2a728 Michael Hanselmann
        inthere = True
346 4cc2a728 Michael Hanselmann
        save_lines.append(rawline)
347 4cc2a728 Michael Hanselmann
        logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
348 4cc2a728 Michael Hanselmann
        continue
349 4cc2a728 Michael Hanselmann
350 4cc2a728 Michael Hanselmann
      if havesome and (not haveall or key != pubkey):
351 4cc2a728 Michael Hanselmann
        removed = True
352 4cc2a728 Michael Hanselmann
        logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
353 4cc2a728 Michael Hanselmann
        continue
354 a8083063 Iustin Pop
355 a8083063 Iustin Pop
    save_lines.append(rawline)
356 a8083063 Iustin Pop
357 a8083063 Iustin Pop
  if not inthere:
358 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
359 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
360 a8083063 Iustin Pop
361 a8083063 Iustin Pop
  if removed:
362 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
363 a8083063 Iustin Pop
364 a8083063 Iustin Pop
    # Write a new file and replace old.
365 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
366 82122173 Iustin Pop
                                   constants.DATA_DIR)
367 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
368 82122173 Iustin Pop
    try:
369 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
370 82122173 Iustin Pop
    finally:
371 82122173 Iustin Pop
      newfile.close()
372 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
373 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
374 a8083063 Iustin Pop
375 a8083063 Iustin Pop
  elif add_lines:
376 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
377 a8083063 Iustin Pop
    f.seek(0, 2)
378 a8083063 Iustin Pop
    for add in add_lines:
379 a8083063 Iustin Pop
      f.write(add)
380 a8083063 Iustin Pop
381 a8083063 Iustin Pop
  f.close()
382 a8083063 Iustin Pop
383 a8083063 Iustin Pop
384 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
385 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
386 a8083063 Iustin Pop

387 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
388 a8083063 Iustin Pop
  is the error message.
389 a8083063 Iustin Pop

390 a8083063 Iustin Pop
  """
391 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
392 a8083063 Iustin Pop
  if vgsize is None:
393 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
394 a8083063 Iustin Pop
  elif vgsize < 20480:
395 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
396 191a8385 Guido Trotter
            (vgname, vgsize))
397 a8083063 Iustin Pop
  return None
398 a8083063 Iustin Pop
399 a8083063 Iustin Pop
400 a8083063 Iustin Pop
def _InitSSHSetup(node):
401 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
402 a8083063 Iustin Pop

403 a8083063 Iustin Pop

404 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
405 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
406 a8083063 Iustin Pop

407 a8083063 Iustin Pop
  Args:
408 a8083063 Iustin Pop
    node: the name of this host as a fqdn
409 a8083063 Iustin Pop

410 a8083063 Iustin Pop
  """
411 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
412 a8083063 Iustin Pop
413 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
414 70d9e3d8 Iustin Pop
    if os.path.exists(name):
415 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
416 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
417 a8083063 Iustin Pop
418 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
419 70d9e3d8 Iustin Pop
                         "-f", priv_key,
420 a8083063 Iustin Pop
                         "-q", "-N", ""])
421 a8083063 Iustin Pop
  if result.failed:
422 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
423 3ecf6786 Iustin Pop
                             result.output)
424 a8083063 Iustin Pop
425 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
426 a8083063 Iustin Pop
  try:
427 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
428 a8083063 Iustin Pop
  finally:
429 a8083063 Iustin Pop
    f.close()
430 a8083063 Iustin Pop
431 a8083063 Iustin Pop
432 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
433 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
434 a8083063 Iustin Pop

435 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
436 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
437 a8083063 Iustin Pop

438 a8083063 Iustin Pop
  """
439 a8083063 Iustin Pop
  # Create pseudo random password
440 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
441 a8083063 Iustin Pop
  # and write it into sstore
442 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
443 a8083063 Iustin Pop
444 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
445 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
446 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
447 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
448 a8083063 Iustin Pop
  if result.failed:
449 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
450 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
451 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
452 a8083063 Iustin Pop
453 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
454 a8083063 Iustin Pop
455 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
456 a8083063 Iustin Pop
457 a8083063 Iustin Pop
  if result.failed:
458 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
459 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
460 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
461 a8083063 Iustin Pop
462 a8083063 Iustin Pop
463 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
464 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
465 bf6929a2 Alexander Schreiber

466 bf6929a2 Alexander Schreiber
  """
467 bf6929a2 Alexander Schreiber
  # check bridges existance
468 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
469 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
470 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
471 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
472 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
473 bf6929a2 Alexander Schreiber
474 bf6929a2 Alexander Schreiber
475 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
476 a8083063 Iustin Pop
  """Initialise the cluster.
477 a8083063 Iustin Pop

478 a8083063 Iustin Pop
  """
479 a8083063 Iustin Pop
  HPATH = "cluster-init"
480 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
481 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
482 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
483 a8083063 Iustin Pop
  REQ_CLUSTER = False
484 a8083063 Iustin Pop
485 a8083063 Iustin Pop
  def BuildHooksEnv(self):
486 a8083063 Iustin Pop
    """Build hooks env.
487 a8083063 Iustin Pop

488 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
489 a8083063 Iustin Pop
    ourselves in the post-run node list.
490 a8083063 Iustin Pop

491 a8083063 Iustin Pop
    """
492 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
493 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
494 a8083063 Iustin Pop
495 a8083063 Iustin Pop
  def CheckPrereq(self):
496 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
497 a8083063 Iustin Pop

498 a8083063 Iustin Pop
    """
499 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
500 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
501 a8083063 Iustin Pop
502 2a6469d5 Alexander Schreiber
    if self.op.hypervisor_type == constants.HT_XEN_HVM31:
503 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
504 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Please prepare the cluster VNC"
505 2a6469d5 Alexander Schreiber
                                   "password file %s" %
506 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
507 2a6469d5 Alexander Schreiber
508 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
509 ff98055b Iustin Pop
510 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
511 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
512 130e907e Iustin Pop
                                 " range (%s). Please fix DNS or /etc/hosts." %
513 bcf043c9 Iustin Pop
                                 (hostname.ip,))
514 130e907e Iustin Pop
515 89e1fc26 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
516 a8083063 Iustin Pop
517 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, hostname.ip,
518 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
519 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
520 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
521 3ecf6786 Iustin Pop
                                 " belong to this host."
522 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
523 a8083063 Iustin Pop
524 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
525 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
526 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
527 16abfbc2 Alexander Schreiber
    if (secondary_ip and
528 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
529 16abfbc2 Alexander Schreiber
        (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
530 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT))):
531 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("You gave %s as secondary IP,"
532 f4bc1f2c Michael Hanselmann
                                 " but it does not belong to this host." %
533 16abfbc2 Alexander Schreiber
                                 secondary_ip)
534 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
535 a8083063 Iustin Pop
536 a8083063 Iustin Pop
    # checks presence of the volume group given
537 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
538 a8083063 Iustin Pop
539 a8083063 Iustin Pop
    if vgstatus:
540 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
541 a8083063 Iustin Pop
542 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
543 a8083063 Iustin Pop
                    self.op.mac_prefix):
544 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
545 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
546 a8083063 Iustin Pop
547 2584d4a4 Alexander Schreiber
    if self.op.hypervisor_type not in constants.HYPER_TYPES:
548 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
549 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
550 a8083063 Iustin Pop
551 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
552 880478f8 Iustin Pop
    if result.failed:
553 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
554 8925faaa Iustin Pop
                                 (self.op.master_netdev,
555 8925faaa Iustin Pop
                                  result.output.strip()))
556 880478f8 Iustin Pop
557 7dd30006 Michael Hanselmann
    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
558 7dd30006 Michael Hanselmann
            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
559 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("Init.d script '%s' missing or not"
560 f4bc1f2c Michael Hanselmann
                                 " executable." % constants.NODE_INITD_SCRIPT)
561 c7b46d59 Iustin Pop
562 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
563 a8083063 Iustin Pop
    """Initialize the cluster.
564 a8083063 Iustin Pop

565 a8083063 Iustin Pop
    """
566 a8083063 Iustin Pop
    clustername = self.clustername
567 a8083063 Iustin Pop
    hostname = self.hostname
568 a8083063 Iustin Pop
569 a8083063 Iustin Pop
    # set up the simple store
570 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
571 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
572 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
573 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
574 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
575 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
576 a8083063 Iustin Pop
577 a8083063 Iustin Pop
    # set up the inter-node password and certificate
578 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
579 a8083063 Iustin Pop
580 a8083063 Iustin Pop
    # start the master ip
581 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
582 a8083063 Iustin Pop
583 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
584 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
585 a8083063 Iustin Pop
    try:
586 a8083063 Iustin Pop
      sshline = f.read()
587 a8083063 Iustin Pop
    finally:
588 a8083063 Iustin Pop
      f.close()
589 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
590 a8083063 Iustin Pop
591 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(hostname.name)
592 a8083063 Iustin Pop
593 bcf043c9 Iustin Pop
    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
594 a8083063 Iustin Pop
595 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
596 a8083063 Iustin Pop
597 a8083063 Iustin Pop
    # init of cluster config file
598 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
599 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
600 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
601 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
602 a8083063 Iustin Pop
603 a8083063 Iustin Pop
604 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
605 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
606 a8083063 Iustin Pop

607 a8083063 Iustin Pop
  """
608 a8083063 Iustin Pop
  _OP_REQP = []
609 a8083063 Iustin Pop
610 a8083063 Iustin Pop
  def CheckPrereq(self):
611 a8083063 Iustin Pop
    """Check prerequisites.
612 a8083063 Iustin Pop

613 a8083063 Iustin Pop
    This checks whether the cluster is empty.
614 a8083063 Iustin Pop

615 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
616 a8083063 Iustin Pop

617 a8083063 Iustin Pop
    """
618 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
619 a8083063 Iustin Pop
620 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
621 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
622 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
623 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
624 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
625 db915bd1 Michael Hanselmann
    if instancelist:
626 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
627 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
628 a8083063 Iustin Pop
629 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
630 a8083063 Iustin Pop
    """Destroys the cluster.
631 a8083063 Iustin Pop

632 a8083063 Iustin Pop
    """
633 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
634 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
635 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
636 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
637 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
638 a8083063 Iustin Pop
639 a8083063 Iustin Pop
640 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
641 a8083063 Iustin Pop
  """Verifies the cluster status.
642 a8083063 Iustin Pop

643 a8083063 Iustin Pop
  """
644 a8083063 Iustin Pop
  _OP_REQP = []
645 a8083063 Iustin Pop
646 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
647 a8083063 Iustin Pop
                  remote_version, feedback_fn):
648 a8083063 Iustin Pop
    """Run multiple tests against a node.
649 a8083063 Iustin Pop

650 a8083063 Iustin Pop
    Test list:
651 a8083063 Iustin Pop
      - compares ganeti version
652 a8083063 Iustin Pop
      - checks vg existance and size > 20G
653 a8083063 Iustin Pop
      - checks config file checksum
654 a8083063 Iustin Pop
      - checks ssh to other nodes
655 a8083063 Iustin Pop

656 a8083063 Iustin Pop
    Args:
657 a8083063 Iustin Pop
      node: name of the node to check
658 a8083063 Iustin Pop
      file_list: required list of files
659 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
660 098c0958 Michael Hanselmann

661 a8083063 Iustin Pop
    """
662 a8083063 Iustin Pop
    # compares ganeti version
663 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
664 a8083063 Iustin Pop
    if not remote_version:
665 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
666 a8083063 Iustin Pop
      return True
667 a8083063 Iustin Pop
668 a8083063 Iustin Pop
    if local_version != remote_version:
669 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
670 a8083063 Iustin Pop
                      (local_version, node, remote_version))
671 a8083063 Iustin Pop
      return True
672 a8083063 Iustin Pop
673 a8083063 Iustin Pop
    # checks vg existance and size > 20G
674 a8083063 Iustin Pop
675 a8083063 Iustin Pop
    bad = False
676 a8083063 Iustin Pop
    if not vglist:
677 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
678 a8083063 Iustin Pop
                      (node,))
679 a8083063 Iustin Pop
      bad = True
680 a8083063 Iustin Pop
    else:
681 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
682 a8083063 Iustin Pop
      if vgstatus:
683 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
684 a8083063 Iustin Pop
        bad = True
685 a8083063 Iustin Pop
686 a8083063 Iustin Pop
    # checks config file checksum
687 a8083063 Iustin Pop
    # checks ssh to any
688 a8083063 Iustin Pop
689 a8083063 Iustin Pop
    if 'filelist' not in node_result:
690 a8083063 Iustin Pop
      bad = True
691 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
692 a8083063 Iustin Pop
    else:
693 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
694 a8083063 Iustin Pop
      for file_name in file_list:
695 a8083063 Iustin Pop
        if file_name not in remote_cksum:
696 a8083063 Iustin Pop
          bad = True
697 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
698 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
699 a8083063 Iustin Pop
          bad = True
700 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
701 a8083063 Iustin Pop
702 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
703 a8083063 Iustin Pop
      bad = True
704 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
705 a8083063 Iustin Pop
    else:
706 a8083063 Iustin Pop
      if node_result['nodelist']:
707 a8083063 Iustin Pop
        bad = True
708 a8083063 Iustin Pop
        for node in node_result['nodelist']:
709 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
710 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
711 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
712 a8083063 Iustin Pop
    if hyp_result is not None:
713 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
714 a8083063 Iustin Pop
    return bad
715 a8083063 Iustin Pop
716 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
717 a8083063 Iustin Pop
    """Verify an instance.
718 a8083063 Iustin Pop

719 a8083063 Iustin Pop
    This function checks to see if the required block devices are
720 a8083063 Iustin Pop
    available on the instance's node.
721 a8083063 Iustin Pop

722 a8083063 Iustin Pop
    """
723 a8083063 Iustin Pop
    bad = False
724 a8083063 Iustin Pop
725 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
726 a8083063 Iustin Pop
    if not instance in instancelist:
727 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
728 a8083063 Iustin Pop
                      (instance, instancelist))
729 a8083063 Iustin Pop
      bad = True
730 a8083063 Iustin Pop
731 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
732 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
733 a8083063 Iustin Pop
734 a8083063 Iustin Pop
    node_vol_should = {}
735 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
736 a8083063 Iustin Pop
737 a8083063 Iustin Pop
    for node in node_vol_should:
738 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
739 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
740 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
741 a8083063 Iustin Pop
                          (volume, node))
742 a8083063 Iustin Pop
          bad = True
743 a8083063 Iustin Pop
744 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
745 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
746 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
747 a8083063 Iustin Pop
                        (instance, node_current))
748 a8083063 Iustin Pop
        bad = True
749 a8083063 Iustin Pop
750 a8083063 Iustin Pop
    for node in node_instance:
751 a8083063 Iustin Pop
      if (not node == node_current):
752 a8083063 Iustin Pop
        if instance in node_instance[node]:
753 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
754 a8083063 Iustin Pop
                          (instance, node))
755 a8083063 Iustin Pop
          bad = True
756 a8083063 Iustin Pop
757 6a438c98 Michael Hanselmann
    return bad
758 a8083063 Iustin Pop
759 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
760 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
761 a8083063 Iustin Pop

762 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
763 a8083063 Iustin Pop
    reported as unknown.
764 a8083063 Iustin Pop

765 a8083063 Iustin Pop
    """
766 a8083063 Iustin Pop
    bad = False
767 a8083063 Iustin Pop
768 a8083063 Iustin Pop
    for node in node_vol_is:
769 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
770 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
771 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
772 a8083063 Iustin Pop
                      (volume, node))
773 a8083063 Iustin Pop
          bad = True
774 a8083063 Iustin Pop
    return bad
775 a8083063 Iustin Pop
776 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
777 a8083063 Iustin Pop
    """Verify the list of running instances.
778 a8083063 Iustin Pop

779 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
780 a8083063 Iustin Pop

781 a8083063 Iustin Pop
    """
782 a8083063 Iustin Pop
    bad = False
783 a8083063 Iustin Pop
    for node in node_instance:
784 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
785 a8083063 Iustin Pop
        if runninginstance not in instancelist:
786 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
787 a8083063 Iustin Pop
                          (runninginstance, node))
788 a8083063 Iustin Pop
          bad = True
789 a8083063 Iustin Pop
    return bad
790 a8083063 Iustin Pop
791 a8083063 Iustin Pop
  def CheckPrereq(self):
792 a8083063 Iustin Pop
    """Check prerequisites.
793 a8083063 Iustin Pop

794 a8083063 Iustin Pop
    This has no prerequisites.
795 a8083063 Iustin Pop

796 a8083063 Iustin Pop
    """
797 a8083063 Iustin Pop
    pass
798 a8083063 Iustin Pop
799 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
800 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
801 a8083063 Iustin Pop

802 a8083063 Iustin Pop
    """
803 a8083063 Iustin Pop
    bad = False
804 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
805 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
806 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
807 a8083063 Iustin Pop
808 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
809 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
810 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
811 a8083063 Iustin Pop
    node_volume = {}
812 a8083063 Iustin Pop
    node_instance = {}
813 a8083063 Iustin Pop
814 a8083063 Iustin Pop
    # FIXME: verify OS list
815 a8083063 Iustin Pop
    # do local checksums
816 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
817 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
818 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
819 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
820 a8083063 Iustin Pop
821 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
822 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
823 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
824 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
825 a8083063 Iustin Pop
    node_verify_param = {
826 a8083063 Iustin Pop
      'filelist': file_names,
827 a8083063 Iustin Pop
      'nodelist': nodelist,
828 a8083063 Iustin Pop
      'hypervisor': None,
829 a8083063 Iustin Pop
      }
830 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
831 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
832 a8083063 Iustin Pop
833 a8083063 Iustin Pop
    for node in nodelist:
834 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
835 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
836 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
837 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
838 a8083063 Iustin Pop
      bad = bad or result
839 a8083063 Iustin Pop
840 a8083063 Iustin Pop
      # node_volume
841 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
842 a8083063 Iustin Pop
843 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
844 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
845 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
846 b63ed789 Iustin Pop
        bad = True
847 b63ed789 Iustin Pop
        node_volume[node] = {}
848 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
849 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
850 a8083063 Iustin Pop
        bad = True
851 a8083063 Iustin Pop
        continue
852 b63ed789 Iustin Pop
      else:
853 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
854 a8083063 Iustin Pop
855 a8083063 Iustin Pop
      # node_instance
856 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
857 a8083063 Iustin Pop
      if type(nodeinstance) != list:
858 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
859 a8083063 Iustin Pop
        bad = True
860 a8083063 Iustin Pop
        continue
861 a8083063 Iustin Pop
862 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
863 a8083063 Iustin Pop
864 a8083063 Iustin Pop
    node_vol_should = {}
865 a8083063 Iustin Pop
866 a8083063 Iustin Pop
    for instance in instancelist:
867 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
868 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
869 a8083063 Iustin Pop
                                     feedback_fn)
870 a8083063 Iustin Pop
      bad = bad or result
871 a8083063 Iustin Pop
872 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
873 a8083063 Iustin Pop
874 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
875 a8083063 Iustin Pop
876 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
877 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
878 a8083063 Iustin Pop
                                       feedback_fn)
879 a8083063 Iustin Pop
    bad = bad or result
880 a8083063 Iustin Pop
881 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
882 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
883 a8083063 Iustin Pop
                                         feedback_fn)
884 a8083063 Iustin Pop
    bad = bad or result
885 a8083063 Iustin Pop
886 a8083063 Iustin Pop
    return int(bad)
887 a8083063 Iustin Pop
888 a8083063 Iustin Pop
889 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
890 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
891 2c95a8d4 Iustin Pop

892 2c95a8d4 Iustin Pop
  """
893 2c95a8d4 Iustin Pop
  _OP_REQP = []
894 2c95a8d4 Iustin Pop
895 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
896 2c95a8d4 Iustin Pop
    """Check prerequisites.
897 2c95a8d4 Iustin Pop

898 2c95a8d4 Iustin Pop
    This has no prerequisites.
899 2c95a8d4 Iustin Pop

900 2c95a8d4 Iustin Pop
    """
901 2c95a8d4 Iustin Pop
    pass
902 2c95a8d4 Iustin Pop
903 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
904 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
905 2c95a8d4 Iustin Pop

906 2c95a8d4 Iustin Pop
    """
907 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
908 2c95a8d4 Iustin Pop
909 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
910 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
911 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
912 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
913 2c95a8d4 Iustin Pop
914 2c95a8d4 Iustin Pop
    nv_dict = {}
915 2c95a8d4 Iustin Pop
    for inst in instances:
916 2c95a8d4 Iustin Pop
      inst_lvs = {}
917 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
918 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
919 2c95a8d4 Iustin Pop
        continue
920 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
921 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
922 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
923 2c95a8d4 Iustin Pop
        for vol in vol_list:
924 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
925 2c95a8d4 Iustin Pop
926 2c95a8d4 Iustin Pop
    if not nv_dict:
927 2c95a8d4 Iustin Pop
      return result
928 2c95a8d4 Iustin Pop
929 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
930 2c95a8d4 Iustin Pop
931 2c95a8d4 Iustin Pop
    to_act = set()
932 2c95a8d4 Iustin Pop
    for node in nodes:
933 2c95a8d4 Iustin Pop
      # node_volume
934 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
935 2c95a8d4 Iustin Pop
936 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
937 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
938 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
939 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
940 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
941 2c95a8d4 Iustin Pop
                    (node,))
942 2c95a8d4 Iustin Pop
        res_nodes.append(node)
943 2c95a8d4 Iustin Pop
        continue
944 2c95a8d4 Iustin Pop
945 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
946 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
947 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
948 b63ed789 Iustin Pop
            and inst.name not in res_instances):
949 2c95a8d4 Iustin Pop
            res_instances.append(inst.name)
950 2c95a8d4 Iustin Pop
951 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
952 b63ed789 Iustin Pop
    # data better
953 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
954 b63ed789 Iustin Pop
      if inst.name not in res_missing:
955 b63ed789 Iustin Pop
        res_missing[inst.name] = []
956 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
957 b63ed789 Iustin Pop
958 2c95a8d4 Iustin Pop
    return result
959 2c95a8d4 Iustin Pop
960 2c95a8d4 Iustin Pop
961 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
962 07bd8a51 Iustin Pop
  """Rename the cluster.
963 07bd8a51 Iustin Pop

964 07bd8a51 Iustin Pop
  """
965 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
966 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
967 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
968 07bd8a51 Iustin Pop
969 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
970 07bd8a51 Iustin Pop
    """Build hooks env.
971 07bd8a51 Iustin Pop

972 07bd8a51 Iustin Pop
    """
973 07bd8a51 Iustin Pop
    env = {
974 0e137c28 Iustin Pop
      "OP_TARGET": self.op.sstore.GetClusterName(),
975 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
976 07bd8a51 Iustin Pop
      }
977 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
978 07bd8a51 Iustin Pop
    return env, [mn], [mn]
979 07bd8a51 Iustin Pop
980 07bd8a51 Iustin Pop
  def CheckPrereq(self):
981 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
982 07bd8a51 Iustin Pop

983 07bd8a51 Iustin Pop
    """
984 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
985 07bd8a51 Iustin Pop
986 bcf043c9 Iustin Pop
    new_name = hostname.name
987 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
988 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
989 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
990 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
991 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
992 07bd8a51 Iustin Pop
                                 " cluster has changed")
993 07bd8a51 Iustin Pop
    if new_ip != old_ip:
994 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
995 07bd8a51 Iustin Pop
      if not result.failed:
996 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
997 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
998 07bd8a51 Iustin Pop
                                   new_ip)
999 07bd8a51 Iustin Pop
1000 07bd8a51 Iustin Pop
    self.op.name = new_name
1001 07bd8a51 Iustin Pop
1002 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1003 07bd8a51 Iustin Pop
    """Rename the cluster.
1004 07bd8a51 Iustin Pop

1005 07bd8a51 Iustin Pop
    """
1006 07bd8a51 Iustin Pop
    clustername = self.op.name
1007 07bd8a51 Iustin Pop
    ip = self.ip
1008 07bd8a51 Iustin Pop
    ss = self.sstore
1009 07bd8a51 Iustin Pop
1010 07bd8a51 Iustin Pop
    # shutdown the master IP
1011 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1012 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
1013 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1014 07bd8a51 Iustin Pop
1015 07bd8a51 Iustin Pop
    try:
1016 07bd8a51 Iustin Pop
      # modify the sstore
1017 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1018 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1019 07bd8a51 Iustin Pop
1020 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1021 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1022 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1023 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1024 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1025 07bd8a51 Iustin Pop
1026 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1027 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1028 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1029 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1030 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1031 07bd8a51 Iustin Pop
          if not result[to_node]:
1032 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1033 07bd8a51 Iustin Pop
                         (fname, to_node))
1034 07bd8a51 Iustin Pop
    finally:
1035 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
1036 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1037 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1038 07bd8a51 Iustin Pop
1039 07bd8a51 Iustin Pop
1040 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1041 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1042 a8083063 Iustin Pop

1043 a8083063 Iustin Pop
  """
1044 a8083063 Iustin Pop
  if not instance.disks:
1045 a8083063 Iustin Pop
    return True
1046 a8083063 Iustin Pop
1047 a8083063 Iustin Pop
  if not oneshot:
1048 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1049 a8083063 Iustin Pop
1050 a8083063 Iustin Pop
  node = instance.primary_node
1051 a8083063 Iustin Pop
1052 a8083063 Iustin Pop
  for dev in instance.disks:
1053 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1054 a8083063 Iustin Pop
1055 a8083063 Iustin Pop
  retries = 0
1056 a8083063 Iustin Pop
  while True:
1057 a8083063 Iustin Pop
    max_time = 0
1058 a8083063 Iustin Pop
    done = True
1059 a8083063 Iustin Pop
    cumul_degraded = False
1060 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1061 a8083063 Iustin Pop
    if not rstats:
1062 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1063 a8083063 Iustin Pop
      retries += 1
1064 a8083063 Iustin Pop
      if retries >= 10:
1065 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1066 3ecf6786 Iustin Pop
                                 " aborting." % node)
1067 a8083063 Iustin Pop
      time.sleep(6)
1068 a8083063 Iustin Pop
      continue
1069 a8083063 Iustin Pop
    retries = 0
1070 a8083063 Iustin Pop
    for i in range(len(rstats)):
1071 a8083063 Iustin Pop
      mstat = rstats[i]
1072 a8083063 Iustin Pop
      if mstat is None:
1073 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1074 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1075 a8083063 Iustin Pop
        continue
1076 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1077 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1078 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1079 a8083063 Iustin Pop
      if perc_done is not None:
1080 a8083063 Iustin Pop
        done = False
1081 a8083063 Iustin Pop
        if est_time is not None:
1082 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1083 a8083063 Iustin Pop
          max_time = est_time
1084 a8083063 Iustin Pop
        else:
1085 a8083063 Iustin Pop
          rem_time = "no time estimate"
1086 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1087 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1088 a8083063 Iustin Pop
    if done or oneshot:
1089 a8083063 Iustin Pop
      break
1090 a8083063 Iustin Pop
1091 a8083063 Iustin Pop
    if unlock:
1092 a8083063 Iustin Pop
      utils.Unlock('cmd')
1093 a8083063 Iustin Pop
    try:
1094 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1095 a8083063 Iustin Pop
    finally:
1096 a8083063 Iustin Pop
      if unlock:
1097 a8083063 Iustin Pop
        utils.Lock('cmd')
1098 a8083063 Iustin Pop
1099 a8083063 Iustin Pop
  if done:
1100 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1101 a8083063 Iustin Pop
  return not cumul_degraded
1102 a8083063 Iustin Pop
1103 a8083063 Iustin Pop
1104 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1105 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1106 a8083063 Iustin Pop

1107 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1108 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1109 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1110 0834c866 Iustin Pop

1111 a8083063 Iustin Pop
  """
1112 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1113 0834c866 Iustin Pop
  if ldisk:
1114 0834c866 Iustin Pop
    idx = 6
1115 0834c866 Iustin Pop
  else:
1116 0834c866 Iustin Pop
    idx = 5
1117 a8083063 Iustin Pop
1118 a8083063 Iustin Pop
  result = True
1119 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1120 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1121 a8083063 Iustin Pop
    if not rstats:
1122 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1123 a8083063 Iustin Pop
      result = False
1124 a8083063 Iustin Pop
    else:
1125 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1126 a8083063 Iustin Pop
  if dev.children:
1127 a8083063 Iustin Pop
    for child in dev.children:
1128 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1129 a8083063 Iustin Pop
1130 a8083063 Iustin Pop
  return result
1131 a8083063 Iustin Pop
1132 a8083063 Iustin Pop
1133 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1134 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1135 a8083063 Iustin Pop

1136 a8083063 Iustin Pop
  """
1137 a8083063 Iustin Pop
  _OP_REQP = []
1138 a8083063 Iustin Pop
1139 a8083063 Iustin Pop
  def CheckPrereq(self):
1140 a8083063 Iustin Pop
    """Check prerequisites.
1141 a8083063 Iustin Pop

1142 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1143 a8083063 Iustin Pop

1144 a8083063 Iustin Pop
    """
1145 a8083063 Iustin Pop
    return
1146 a8083063 Iustin Pop
1147 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1148 a8083063 Iustin Pop
    """Compute the list of OSes.
1149 a8083063 Iustin Pop

1150 a8083063 Iustin Pop
    """
1151 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1152 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1153 a8083063 Iustin Pop
    if node_data == False:
1154 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1155 a8083063 Iustin Pop
    return node_data
1156 a8083063 Iustin Pop
1157 a8083063 Iustin Pop
1158 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1159 a8083063 Iustin Pop
  """Logical unit for removing a node.
1160 a8083063 Iustin Pop

1161 a8083063 Iustin Pop
  """
1162 a8083063 Iustin Pop
  HPATH = "node-remove"
1163 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1164 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1165 a8083063 Iustin Pop
1166 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1167 a8083063 Iustin Pop
    """Build hooks env.
1168 a8083063 Iustin Pop

1169 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1170 a8083063 Iustin Pop
    node would not allows itself to run.
1171 a8083063 Iustin Pop

1172 a8083063 Iustin Pop
    """
1173 396e1b78 Michael Hanselmann
    env = {
1174 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1175 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1176 396e1b78 Michael Hanselmann
      }
1177 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1178 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1179 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1180 a8083063 Iustin Pop
1181 a8083063 Iustin Pop
  def CheckPrereq(self):
1182 a8083063 Iustin Pop
    """Check prerequisites.
1183 a8083063 Iustin Pop

1184 a8083063 Iustin Pop
    This checks:
1185 a8083063 Iustin Pop
     - the node exists in the configuration
1186 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1187 a8083063 Iustin Pop
     - it's not the master
1188 a8083063 Iustin Pop

1189 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1190 a8083063 Iustin Pop

1191 a8083063 Iustin Pop
    """
1192 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1193 a8083063 Iustin Pop
    if node is None:
1194 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1195 a8083063 Iustin Pop
1196 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1197 a8083063 Iustin Pop
1198 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1199 a8083063 Iustin Pop
    if node.name == masternode:
1200 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1201 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1202 a8083063 Iustin Pop
1203 a8083063 Iustin Pop
    for instance_name in instance_list:
1204 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1205 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1206 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1207 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1208 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1209 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1210 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1211 a8083063 Iustin Pop
    self.op.node_name = node.name
1212 a8083063 Iustin Pop
    self.node = node
1213 a8083063 Iustin Pop
1214 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1215 a8083063 Iustin Pop
    """Removes the node from the cluster.
1216 a8083063 Iustin Pop

1217 a8083063 Iustin Pop
    """
1218 a8083063 Iustin Pop
    node = self.node
1219 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1220 a8083063 Iustin Pop
                node.name)
1221 a8083063 Iustin Pop
1222 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1223 a8083063 Iustin Pop
1224 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1225 a8083063 Iustin Pop
1226 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1227 a8083063 Iustin Pop
1228 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1229 a8083063 Iustin Pop
1230 c8a0948f Michael Hanselmann
    _RemoveHostFromEtcHosts(node.name)
1231 c8a0948f Michael Hanselmann
1232 a8083063 Iustin Pop
1233 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1234 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1235 a8083063 Iustin Pop

1236 a8083063 Iustin Pop
  """
1237 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1238 a8083063 Iustin Pop
1239 a8083063 Iustin Pop
  def CheckPrereq(self):
1240 a8083063 Iustin Pop
    """Check prerequisites.
1241 a8083063 Iustin Pop

1242 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1243 a8083063 Iustin Pop

1244 a8083063 Iustin Pop
    """
1245 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1246 3ef10550 Michael Hanselmann
                                     "mtotal", "mnode", "mfree",
1247 3ef10550 Michael Hanselmann
                                     "bootid"])
1248 a8083063 Iustin Pop
1249 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1250 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1251 ec223efb Iustin Pop
                               "pip", "sip"],
1252 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1253 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1254 a8083063 Iustin Pop
1255 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1256 a8083063 Iustin Pop
1257 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1258 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1259 a8083063 Iustin Pop

1260 a8083063 Iustin Pop
    """
1261 246e180a Iustin Pop
    nodenames = self.wanted
1262 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1263 a8083063 Iustin Pop
1264 a8083063 Iustin Pop
    # begin data gathering
1265 a8083063 Iustin Pop
1266 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1267 a8083063 Iustin Pop
      live_data = {}
1268 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1269 a8083063 Iustin Pop
      for name in nodenames:
1270 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1271 a8083063 Iustin Pop
        if nodeinfo:
1272 a8083063 Iustin Pop
          live_data[name] = {
1273 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1274 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1275 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1276 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1277 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1278 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1279 a8083063 Iustin Pop
            }
1280 a8083063 Iustin Pop
        else:
1281 a8083063 Iustin Pop
          live_data[name] = {}
1282 a8083063 Iustin Pop
    else:
1283 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1284 a8083063 Iustin Pop
1285 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1286 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1287 a8083063 Iustin Pop
1288 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1289 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1290 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1291 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1292 a8083063 Iustin Pop
1293 ec223efb Iustin Pop
      for instance_name in instancelist:
1294 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1295 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1296 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1297 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1298 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1299 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1300 a8083063 Iustin Pop
1301 a8083063 Iustin Pop
    # end data gathering
1302 a8083063 Iustin Pop
1303 a8083063 Iustin Pop
    output = []
1304 a8083063 Iustin Pop
    for node in nodelist:
1305 a8083063 Iustin Pop
      node_output = []
1306 a8083063 Iustin Pop
      for field in self.op.output_fields:
1307 a8083063 Iustin Pop
        if field == "name":
1308 a8083063 Iustin Pop
          val = node.name
1309 ec223efb Iustin Pop
        elif field == "pinst_list":
1310 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1311 ec223efb Iustin Pop
        elif field == "sinst_list":
1312 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1313 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1314 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1315 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1316 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1317 a8083063 Iustin Pop
        elif field == "pip":
1318 a8083063 Iustin Pop
          val = node.primary_ip
1319 a8083063 Iustin Pop
        elif field == "sip":
1320 a8083063 Iustin Pop
          val = node.secondary_ip
1321 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1322 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1323 a8083063 Iustin Pop
        else:
1324 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1325 a8083063 Iustin Pop
        node_output.append(val)
1326 a8083063 Iustin Pop
      output.append(node_output)
1327 a8083063 Iustin Pop
1328 a8083063 Iustin Pop
    return output
1329 a8083063 Iustin Pop
1330 a8083063 Iustin Pop
1331 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1332 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1333 dcb93971 Michael Hanselmann

1334 dcb93971 Michael Hanselmann
  """
1335 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1336 dcb93971 Michael Hanselmann
1337 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1338 dcb93971 Michael Hanselmann
    """Check prerequisites.
1339 dcb93971 Michael Hanselmann

1340 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1341 dcb93971 Michael Hanselmann

1342 dcb93971 Michael Hanselmann
    """
1343 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1344 dcb93971 Michael Hanselmann
1345 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1346 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1347 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1348 dcb93971 Michael Hanselmann
1349 dcb93971 Michael Hanselmann
1350 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1351 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1352 dcb93971 Michael Hanselmann

1353 dcb93971 Michael Hanselmann
    """
1354 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1355 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1356 dcb93971 Michael Hanselmann
1357 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1358 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1359 dcb93971 Michael Hanselmann
1360 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1361 dcb93971 Michael Hanselmann
1362 dcb93971 Michael Hanselmann
    output = []
1363 dcb93971 Michael Hanselmann
    for node in nodenames:
1364 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1365 37d19eb2 Michael Hanselmann
        continue
1366 37d19eb2 Michael Hanselmann
1367 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1368 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1369 dcb93971 Michael Hanselmann
1370 dcb93971 Michael Hanselmann
      for vol in node_vols:
1371 dcb93971 Michael Hanselmann
        node_output = []
1372 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1373 dcb93971 Michael Hanselmann
          if field == "node":
1374 dcb93971 Michael Hanselmann
            val = node
1375 dcb93971 Michael Hanselmann
          elif field == "phys":
1376 dcb93971 Michael Hanselmann
            val = vol['dev']
1377 dcb93971 Michael Hanselmann
          elif field == "vg":
1378 dcb93971 Michael Hanselmann
            val = vol['vg']
1379 dcb93971 Michael Hanselmann
          elif field == "name":
1380 dcb93971 Michael Hanselmann
            val = vol['name']
1381 dcb93971 Michael Hanselmann
          elif field == "size":
1382 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1383 dcb93971 Michael Hanselmann
          elif field == "instance":
1384 dcb93971 Michael Hanselmann
            for inst in ilist:
1385 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1386 dcb93971 Michael Hanselmann
                continue
1387 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1388 dcb93971 Michael Hanselmann
                val = inst.name
1389 dcb93971 Michael Hanselmann
                break
1390 dcb93971 Michael Hanselmann
            else:
1391 dcb93971 Michael Hanselmann
              val = '-'
1392 dcb93971 Michael Hanselmann
          else:
1393 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1394 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1395 dcb93971 Michael Hanselmann
1396 dcb93971 Michael Hanselmann
        output.append(node_output)
1397 dcb93971 Michael Hanselmann
1398 dcb93971 Michael Hanselmann
    return output
1399 dcb93971 Michael Hanselmann
1400 dcb93971 Michael Hanselmann
1401 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1402 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1403 a8083063 Iustin Pop

1404 a8083063 Iustin Pop
  """
1405 a8083063 Iustin Pop
  HPATH = "node-add"
1406 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1407 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1408 a8083063 Iustin Pop
1409 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1410 a8083063 Iustin Pop
    """Build hooks env.
1411 a8083063 Iustin Pop

1412 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1413 a8083063 Iustin Pop

1414 a8083063 Iustin Pop
    """
1415 a8083063 Iustin Pop
    env = {
1416 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1417 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1418 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1419 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1420 a8083063 Iustin Pop
      }
1421 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1422 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1423 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1424 a8083063 Iustin Pop
1425 a8083063 Iustin Pop
  def CheckPrereq(self):
1426 a8083063 Iustin Pop
    """Check prerequisites.
1427 a8083063 Iustin Pop

1428 a8083063 Iustin Pop
    This checks:
1429 a8083063 Iustin Pop
     - the new node is not already in the config
1430 a8083063 Iustin Pop
     - it is resolvable
1431 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1432 a8083063 Iustin Pop

1433 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1434 a8083063 Iustin Pop

1435 a8083063 Iustin Pop
    """
1436 a8083063 Iustin Pop
    node_name = self.op.node_name
1437 a8083063 Iustin Pop
    cfg = self.cfg
1438 a8083063 Iustin Pop
1439 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1440 a8083063 Iustin Pop
1441 bcf043c9 Iustin Pop
    node = dns_data.name
1442 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1443 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1444 a8083063 Iustin Pop
    if secondary_ip is None:
1445 a8083063 Iustin Pop
      secondary_ip = primary_ip
1446 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1447 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1448 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1449 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1450 a8083063 Iustin Pop
    if node in node_list:
1451 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1452 3ecf6786 Iustin Pop
                                 % node)
1453 a8083063 Iustin Pop
1454 a8083063 Iustin Pop
    for existing_node_name in node_list:
1455 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1456 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1457 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1458 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1459 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1460 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1461 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1462 a8083063 Iustin Pop
1463 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1464 a8083063 Iustin Pop
    # same as for the master
1465 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1466 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1467 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1468 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1469 a8083063 Iustin Pop
      if master_singlehomed:
1470 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1471 3ecf6786 Iustin Pop
                                   " new node has one")
1472 a8083063 Iustin Pop
      else:
1473 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1474 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1475 a8083063 Iustin Pop
1476 a8083063 Iustin Pop
    # checks reachablity
1477 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(utils.HostInfo().name,
1478 16abfbc2 Alexander Schreiber
                         primary_ip,
1479 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
1480 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1481 a8083063 Iustin Pop
1482 a8083063 Iustin Pop
    if not newbie_singlehomed:
1483 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1484 16abfbc2 Alexander Schreiber
      if not utils.TcpPing(myself.secondary_ip,
1485 16abfbc2 Alexander Schreiber
                           secondary_ip,
1486 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT):
1487 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1488 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1489 a8083063 Iustin Pop
1490 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1491 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1492 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1493 a8083063 Iustin Pop
1494 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1495 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
1496 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Cluster VNC password file %s missing" %
1497 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
1498 2a6469d5 Alexander Schreiber
1499 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1500 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1501 a8083063 Iustin Pop

1502 a8083063 Iustin Pop
    """
1503 a8083063 Iustin Pop
    new_node = self.new_node
1504 a8083063 Iustin Pop
    node = new_node.name
1505 a8083063 Iustin Pop
1506 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1507 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1508 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1509 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1510 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1511 a8083063 Iustin Pop
    try:
1512 a8083063 Iustin Pop
      gntpem = f.read(8192)
1513 a8083063 Iustin Pop
    finally:
1514 a8083063 Iustin Pop
      f.close()
1515 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1516 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1517 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1518 a8083063 Iustin Pop
    # parsed by the shell sequence below
1519 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1520 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1521 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1522 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1523 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1524 a8083063 Iustin Pop
1525 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1526 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1527 a8083063 Iustin Pop
    # either by being constants or by the checks above
1528 a8083063 Iustin Pop
    ss = self.sstore
1529 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1530 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1531 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1532 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1533 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1534 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1535 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1536 a8083063 Iustin Pop
1537 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1538 a8083063 Iustin Pop
    if result.failed:
1539 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1540 3ecf6786 Iustin Pop
                               " output: %s" %
1541 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1542 a8083063 Iustin Pop
1543 a8083063 Iustin Pop
    # check connectivity
1544 a8083063 Iustin Pop
    time.sleep(4)
1545 a8083063 Iustin Pop
1546 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1547 a8083063 Iustin Pop
    if result:
1548 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1549 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1550 a8083063 Iustin Pop
                    (node, result))
1551 a8083063 Iustin Pop
      else:
1552 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1553 3ecf6786 Iustin Pop
                                 " node version %s" %
1554 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1555 a8083063 Iustin Pop
    else:
1556 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1557 a8083063 Iustin Pop
1558 a8083063 Iustin Pop
    # setup ssh on node
1559 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1560 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1561 a8083063 Iustin Pop
    keyarray = []
1562 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1563 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1564 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1565 a8083063 Iustin Pop
1566 a8083063 Iustin Pop
    for i in keyfiles:
1567 a8083063 Iustin Pop
      f = open(i, 'r')
1568 a8083063 Iustin Pop
      try:
1569 a8083063 Iustin Pop
        keyarray.append(f.read())
1570 a8083063 Iustin Pop
      finally:
1571 a8083063 Iustin Pop
        f.close()
1572 a8083063 Iustin Pop
1573 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1574 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1575 a8083063 Iustin Pop
1576 a8083063 Iustin Pop
    if not result:
1577 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1578 a8083063 Iustin Pop
1579 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1580 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(new_node.name)
1581 c8a0948f Michael Hanselmann
1582 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1583 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1584 a8083063 Iustin Pop
1585 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1586 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1587 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1588 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1589 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1590 16abfbc2 Alexander Schreiber
                                    10, False):
1591 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1592 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1593 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1594 a8083063 Iustin Pop
1595 ff98055b Iustin Pop
    success, msg = ssh.VerifyNodeHostname(node)
1596 ff98055b Iustin Pop
    if not success:
1597 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1598 f4bc1f2c Michael Hanselmann
                               " than the one the resolver gives: %s."
1599 f4bc1f2c Michael Hanselmann
                               " Please fix and re-run this command." %
1600 ff98055b Iustin Pop
                               (node, msg))
1601 ff98055b Iustin Pop
1602 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1603 a8083063 Iustin Pop
    # including the node just added
1604 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1605 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1606 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1607 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1608 a8083063 Iustin Pop
1609 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1610 82122173 Iustin Pop
    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
1611 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1612 a8083063 Iustin Pop
      for to_node in dist_nodes:
1613 a8083063 Iustin Pop
        if not result[to_node]:
1614 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1615 a8083063 Iustin Pop
                       (fname, to_node))
1616 a8083063 Iustin Pop
1617 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1618 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1619 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1620 a8083063 Iustin Pop
    for fname in to_copy:
1621 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1622 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1623 a8083063 Iustin Pop
1624 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1625 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1626 a8083063 Iustin Pop
1627 a8083063 Iustin Pop
1628 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1629 a8083063 Iustin Pop
  """Failover the master node to the current node.
1630 a8083063 Iustin Pop

1631 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1632 a8083063 Iustin Pop

1633 a8083063 Iustin Pop
  """
1634 a8083063 Iustin Pop
  HPATH = "master-failover"
1635 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1636 a8083063 Iustin Pop
  REQ_MASTER = False
1637 a8083063 Iustin Pop
  _OP_REQP = []
1638 a8083063 Iustin Pop
1639 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1640 a8083063 Iustin Pop
    """Build hooks env.
1641 a8083063 Iustin Pop

1642 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1643 a8083063 Iustin Pop
    the nodes in the post phase.
1644 a8083063 Iustin Pop

1645 a8083063 Iustin Pop
    """
1646 a8083063 Iustin Pop
    env = {
1647 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1648 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1649 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1650 a8083063 Iustin Pop
      }
1651 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1652 a8083063 Iustin Pop
1653 a8083063 Iustin Pop
  def CheckPrereq(self):
1654 a8083063 Iustin Pop
    """Check prerequisites.
1655 a8083063 Iustin Pop

1656 a8083063 Iustin Pop
    This checks that we are not already the master.
1657 a8083063 Iustin Pop

1658 a8083063 Iustin Pop
    """
1659 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1660 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1661 a8083063 Iustin Pop
1662 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1663 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1664 f4bc1f2c Michael Hanselmann
                                 " where you want the new master to be."
1665 f4bc1f2c Michael Hanselmann
                                 " %s is already the master" %
1666 3ecf6786 Iustin Pop
                                 self.old_master)
1667 a8083063 Iustin Pop
1668 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1669 a8083063 Iustin Pop
    """Failover the master node.
1670 a8083063 Iustin Pop

1671 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1672 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1673 a8083063 Iustin Pop
    master.
1674 a8083063 Iustin Pop

1675 a8083063 Iustin Pop
    """
1676 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1677 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1678 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1679 a8083063 Iustin Pop
1680 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1681 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1682 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1683 a8083063 Iustin Pop
1684 880478f8 Iustin Pop
    ss = self.sstore
1685 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1686 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1687 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1688 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1689 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1690 880478f8 Iustin Pop
1691 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1692 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1693 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1694 f4bc1f2c Michael Hanselmann
      feedback_fn("Error in activating the master IP on the new master,"
1695 f4bc1f2c Michael Hanselmann
                  " please fix manually.")
1696 a8083063 Iustin Pop
1697 a8083063 Iustin Pop
1698 a8083063 Iustin Pop
1699 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1700 a8083063 Iustin Pop
  """Query cluster configuration.
1701 a8083063 Iustin Pop

1702 a8083063 Iustin Pop
  """
1703 a8083063 Iustin Pop
  _OP_REQP = []
1704 59322403 Iustin Pop
  REQ_MASTER = False
1705 a8083063 Iustin Pop
1706 a8083063 Iustin Pop
  def CheckPrereq(self):
1707 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1708 a8083063 Iustin Pop

1709 a8083063 Iustin Pop
    """
1710 a8083063 Iustin Pop
    pass
1711 a8083063 Iustin Pop
1712 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1713 a8083063 Iustin Pop
    """Return cluster config.
1714 a8083063 Iustin Pop

1715 a8083063 Iustin Pop
    """
1716 a8083063 Iustin Pop
    result = {
1717 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1718 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1719 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1720 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1721 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1722 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1723 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1724 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1725 a8083063 Iustin Pop
      }
1726 a8083063 Iustin Pop
1727 a8083063 Iustin Pop
    return result
1728 a8083063 Iustin Pop
1729 a8083063 Iustin Pop
1730 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1731 a8083063 Iustin Pop
  """Copy file to cluster.
1732 a8083063 Iustin Pop

1733 a8083063 Iustin Pop
  """
1734 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1735 a8083063 Iustin Pop
1736 a8083063 Iustin Pop
  def CheckPrereq(self):
1737 a8083063 Iustin Pop
    """Check prerequisites.
1738 a8083063 Iustin Pop

1739 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1740 a8083063 Iustin Pop
    of nodes is valid.
1741 a8083063 Iustin Pop

1742 a8083063 Iustin Pop
    """
1743 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1744 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1745 dcb93971 Michael Hanselmann
1746 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1747 a8083063 Iustin Pop
1748 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1749 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1750 a8083063 Iustin Pop

1751 a8083063 Iustin Pop
    Args:
1752 a8083063 Iustin Pop
      opts - class with options as members
1753 a8083063 Iustin Pop
      args - list containing a single element, the file name
1754 a8083063 Iustin Pop
    Opts used:
1755 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1756 a8083063 Iustin Pop

1757 a8083063 Iustin Pop
    """
1758 a8083063 Iustin Pop
    filename = self.op.filename
1759 a8083063 Iustin Pop
1760 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
1761 a8083063 Iustin Pop
1762 a7ba5e53 Iustin Pop
    for node in self.nodes:
1763 a8083063 Iustin Pop
      if node == myname:
1764 a8083063 Iustin Pop
        continue
1765 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1766 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1767 a8083063 Iustin Pop
1768 a8083063 Iustin Pop
1769 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1770 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1771 a8083063 Iustin Pop

1772 a8083063 Iustin Pop
  """
1773 a8083063 Iustin Pop
  _OP_REQP = []
1774 a8083063 Iustin Pop
1775 a8083063 Iustin Pop
  def CheckPrereq(self):
1776 a8083063 Iustin Pop
    """No prerequisites.
1777 a8083063 Iustin Pop

1778 a8083063 Iustin Pop
    """
1779 a8083063 Iustin Pop
    pass
1780 a8083063 Iustin Pop
1781 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1782 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1783 a8083063 Iustin Pop

1784 a8083063 Iustin Pop
    """
1785 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1786 a8083063 Iustin Pop
1787 a8083063 Iustin Pop
1788 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1789 a8083063 Iustin Pop
  """Run a command on some nodes.
1790 a8083063 Iustin Pop

1791 a8083063 Iustin Pop
  """
1792 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1793 a8083063 Iustin Pop
1794 a8083063 Iustin Pop
  def CheckPrereq(self):
1795 a8083063 Iustin Pop
    """Check prerequisites.
1796 a8083063 Iustin Pop

1797 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1798 a8083063 Iustin Pop

1799 a8083063 Iustin Pop
    """
1800 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1801 a8083063 Iustin Pop
1802 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1803 a8083063 Iustin Pop
    """Run a command on some nodes.
1804 a8083063 Iustin Pop

1805 a8083063 Iustin Pop
    """
1806 a8083063 Iustin Pop
    data = []
1807 a8083063 Iustin Pop
    for node in self.nodes:
1808 a7ba5e53 Iustin Pop
      result = ssh.SSHCall(node, "root", self.op.command)
1809 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1810 a8083063 Iustin Pop
1811 a8083063 Iustin Pop
    return data
1812 a8083063 Iustin Pop
1813 a8083063 Iustin Pop
1814 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1815 a8083063 Iustin Pop
  """Bring up an instance's disks.
1816 a8083063 Iustin Pop

1817 a8083063 Iustin Pop
  """
1818 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1819 a8083063 Iustin Pop
1820 a8083063 Iustin Pop
  def CheckPrereq(self):
1821 a8083063 Iustin Pop
    """Check prerequisites.
1822 a8083063 Iustin Pop

1823 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1824 a8083063 Iustin Pop

1825 a8083063 Iustin Pop
    """
1826 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1827 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1828 a8083063 Iustin Pop
    if instance is None:
1829 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1830 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1831 a8083063 Iustin Pop
    self.instance = instance
1832 a8083063 Iustin Pop
1833 a8083063 Iustin Pop
1834 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1835 a8083063 Iustin Pop
    """Activate the disks.
1836 a8083063 Iustin Pop

1837 a8083063 Iustin Pop
    """
1838 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1839 a8083063 Iustin Pop
    if not disks_ok:
1840 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1841 a8083063 Iustin Pop
1842 a8083063 Iustin Pop
    return disks_info
1843 a8083063 Iustin Pop
1844 a8083063 Iustin Pop
1845 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1846 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1847 a8083063 Iustin Pop

1848 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1849 a8083063 Iustin Pop

1850 a8083063 Iustin Pop
  Args:
1851 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1852 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1853 a8083063 Iustin Pop
                        in an error return from the function
1854 a8083063 Iustin Pop

1855 a8083063 Iustin Pop
  Returns:
1856 a8083063 Iustin Pop
    false if the operation failed
1857 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1858 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1859 a8083063 Iustin Pop
  """
1860 a8083063 Iustin Pop
  device_info = []
1861 a8083063 Iustin Pop
  disks_ok = True
1862 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1863 a8083063 Iustin Pop
    master_result = None
1864 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1865 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1866 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1867 3f78eef2 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk,
1868 3f78eef2 Iustin Pop
                                          instance.name, is_primary)
1869 a8083063 Iustin Pop
      if not result:
1870 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1871 f4bc1f2c Michael Hanselmann
                     " (is_primary=%s)" %
1872 f4bc1f2c Michael Hanselmann
                     (inst_disk.iv_name, node, is_primary))
1873 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1874 a8083063 Iustin Pop
          disks_ok = False
1875 a8083063 Iustin Pop
      if is_primary:
1876 a8083063 Iustin Pop
        master_result = result
1877 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1878 a8083063 Iustin Pop
                        master_result))
1879 a8083063 Iustin Pop
1880 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1881 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1882 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1883 b352ab5b Iustin Pop
  for disk in instance.disks:
1884 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1885 b352ab5b Iustin Pop
1886 a8083063 Iustin Pop
  return disks_ok, device_info
1887 a8083063 Iustin Pop
1888 a8083063 Iustin Pop
1889 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1890 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1891 3ecf6786 Iustin Pop

1892 3ecf6786 Iustin Pop
  """
1893 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1894 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1895 fe7b0351 Michael Hanselmann
  if not disks_ok:
1896 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1897 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1898 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1899 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1900 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1901 fe7b0351 Michael Hanselmann
1902 fe7b0351 Michael Hanselmann
1903 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1904 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1905 a8083063 Iustin Pop

1906 a8083063 Iustin Pop
  """
1907 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1908 a8083063 Iustin Pop
1909 a8083063 Iustin Pop
  def CheckPrereq(self):
1910 a8083063 Iustin Pop
    """Check prerequisites.
1911 a8083063 Iustin Pop

1912 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1913 a8083063 Iustin Pop

1914 a8083063 Iustin Pop
    """
1915 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1916 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1917 a8083063 Iustin Pop
    if instance is None:
1918 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1919 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1920 a8083063 Iustin Pop
    self.instance = instance
1921 a8083063 Iustin Pop
1922 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1923 a8083063 Iustin Pop
    """Deactivate the disks
1924 a8083063 Iustin Pop

1925 a8083063 Iustin Pop
    """
1926 a8083063 Iustin Pop
    instance = self.instance
1927 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1928 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1929 a8083063 Iustin Pop
    if not type(ins_l) is list:
1930 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1931 3ecf6786 Iustin Pop
                               instance.primary_node)
1932 a8083063 Iustin Pop
1933 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1934 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1935 3ecf6786 Iustin Pop
                               " block devices.")
1936 a8083063 Iustin Pop
1937 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1938 a8083063 Iustin Pop
1939 a8083063 Iustin Pop
1940 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1941 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1942 a8083063 Iustin Pop

1943 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1944 a8083063 Iustin Pop

1945 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1946 a8083063 Iustin Pop
  ignored.
1947 a8083063 Iustin Pop

1948 a8083063 Iustin Pop
  """
1949 a8083063 Iustin Pop
  result = True
1950 a8083063 Iustin Pop
  for disk in instance.disks:
1951 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1952 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1953 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1954 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1955 a8083063 Iustin Pop
                     (disk.iv_name, node))
1956 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1957 a8083063 Iustin Pop
          result = False
1958 a8083063 Iustin Pop
  return result
1959 a8083063 Iustin Pop
1960 a8083063 Iustin Pop
1961 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1962 a8083063 Iustin Pop
  """Starts an instance.
1963 a8083063 Iustin Pop

1964 a8083063 Iustin Pop
  """
1965 a8083063 Iustin Pop
  HPATH = "instance-start"
1966 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1967 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1968 a8083063 Iustin Pop
1969 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1970 a8083063 Iustin Pop
    """Build hooks env.
1971 a8083063 Iustin Pop

1972 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1973 a8083063 Iustin Pop

1974 a8083063 Iustin Pop
    """
1975 a8083063 Iustin Pop
    env = {
1976 a8083063 Iustin Pop
      "FORCE": self.op.force,
1977 a8083063 Iustin Pop
      }
1978 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1979 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1980 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1981 a8083063 Iustin Pop
    return env, nl, nl
1982 a8083063 Iustin Pop
1983 a8083063 Iustin Pop
  def CheckPrereq(self):
1984 a8083063 Iustin Pop
    """Check prerequisites.
1985 a8083063 Iustin Pop

1986 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1987 a8083063 Iustin Pop

1988 a8083063 Iustin Pop
    """
1989 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1990 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1991 a8083063 Iustin Pop
    if instance is None:
1992 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1993 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1994 a8083063 Iustin Pop
1995 a8083063 Iustin Pop
    # check bridges existance
1996 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
1997 a8083063 Iustin Pop
1998 a8083063 Iustin Pop
    self.instance = instance
1999 a8083063 Iustin Pop
    self.op.instance_name = instance.name
2000 a8083063 Iustin Pop
2001 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2002 a8083063 Iustin Pop
    """Start the instance.
2003 a8083063 Iustin Pop

2004 a8083063 Iustin Pop
    """
2005 a8083063 Iustin Pop
    instance = self.instance
2006 a8083063 Iustin Pop
    force = self.op.force
2007 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2008 a8083063 Iustin Pop
2009 a8083063 Iustin Pop
    node_current = instance.primary_node
2010 a8083063 Iustin Pop
2011 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
2012 a8083063 Iustin Pop
    if not nodeinfo:
2013 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact node %s for infos" %
2014 3ecf6786 Iustin Pop
                               (node_current))
2015 a8083063 Iustin Pop
2016 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
2017 a8083063 Iustin Pop
    memory = instance.memory
2018 a8083063 Iustin Pop
    if memory > freememory:
2019 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to start instance"
2020 3ecf6786 Iustin Pop
                               " %s on node %s"
2021 3ecf6786 Iustin Pop
                               " needed %s MiB, available %s MiB" %
2022 3ecf6786 Iustin Pop
                               (instance.name, node_current, memory,
2023 3ecf6786 Iustin Pop
                                freememory))
2024 a8083063 Iustin Pop
2025 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2026 a8083063 Iustin Pop
2027 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2028 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2029 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2030 a8083063 Iustin Pop
2031 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2032 a8083063 Iustin Pop
2033 a8083063 Iustin Pop
2034 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2035 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2036 bf6929a2 Alexander Schreiber

2037 bf6929a2 Alexander Schreiber
  """
2038 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2039 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2040 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2041 bf6929a2 Alexander Schreiber
2042 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2043 bf6929a2 Alexander Schreiber
    """Build hooks env.
2044 bf6929a2 Alexander Schreiber

2045 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2046 bf6929a2 Alexander Schreiber

2047 bf6929a2 Alexander Schreiber
    """
2048 bf6929a2 Alexander Schreiber
    env = {
2049 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2050 bf6929a2 Alexander Schreiber
      }
2051 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2052 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2053 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2054 bf6929a2 Alexander Schreiber
    return env, nl, nl
2055 bf6929a2 Alexander Schreiber
2056 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2057 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2058 bf6929a2 Alexander Schreiber

2059 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2060 bf6929a2 Alexander Schreiber

2061 bf6929a2 Alexander Schreiber
    """
2062 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2063 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2064 bf6929a2 Alexander Schreiber
    if instance is None:
2065 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2066 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2067 bf6929a2 Alexander Schreiber
2068 bf6929a2 Alexander Schreiber
    # check bridges existance
2069 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2070 bf6929a2 Alexander Schreiber
2071 bf6929a2 Alexander Schreiber
    self.instance = instance
2072 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2073 bf6929a2 Alexander Schreiber
2074 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2075 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2076 bf6929a2 Alexander Schreiber

2077 bf6929a2 Alexander Schreiber
    """
2078 bf6929a2 Alexander Schreiber
    instance = self.instance
2079 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2080 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2081 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2082 bf6929a2 Alexander Schreiber
2083 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2084 bf6929a2 Alexander Schreiber
2085 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2086 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2087 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2088 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2089 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2090 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2091 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2092 bf6929a2 Alexander Schreiber
2093 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2094 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2095 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2096 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2097 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2098 bf6929a2 Alexander Schreiber
    else:
2099 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2100 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2101 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2102 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2103 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2104 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2105 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2106 bf6929a2 Alexander Schreiber
2107 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2108 bf6929a2 Alexander Schreiber
2109 bf6929a2 Alexander Schreiber
2110 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2111 a8083063 Iustin Pop
  """Shutdown an instance.
2112 a8083063 Iustin Pop

2113 a8083063 Iustin Pop
  """
2114 a8083063 Iustin Pop
  HPATH = "instance-stop"
2115 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2116 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2117 a8083063 Iustin Pop
2118 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2119 a8083063 Iustin Pop
    """Build hooks env.
2120 a8083063 Iustin Pop

2121 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2122 a8083063 Iustin Pop

2123 a8083063 Iustin Pop
    """
2124 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2125 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2126 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2127 a8083063 Iustin Pop
    return env, nl, nl
2128 a8083063 Iustin Pop
2129 a8083063 Iustin Pop
  def CheckPrereq(self):
2130 a8083063 Iustin Pop
    """Check prerequisites.
2131 a8083063 Iustin Pop

2132 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2133 a8083063 Iustin Pop

2134 a8083063 Iustin Pop
    """
2135 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2136 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2137 a8083063 Iustin Pop
    if instance is None:
2138 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2139 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2140 a8083063 Iustin Pop
    self.instance = instance
2141 a8083063 Iustin Pop
2142 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2143 a8083063 Iustin Pop
    """Shutdown the instance.
2144 a8083063 Iustin Pop

2145 a8083063 Iustin Pop
    """
2146 a8083063 Iustin Pop
    instance = self.instance
2147 a8083063 Iustin Pop
    node_current = instance.primary_node
2148 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2149 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2150 a8083063 Iustin Pop
2151 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2152 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2153 a8083063 Iustin Pop
2154 a8083063 Iustin Pop
2155 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2156 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2157 fe7b0351 Michael Hanselmann

2158 fe7b0351 Michael Hanselmann
  """
2159 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2160 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2161 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2162 fe7b0351 Michael Hanselmann
2163 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2164 fe7b0351 Michael Hanselmann
    """Build hooks env.
2165 fe7b0351 Michael Hanselmann

2166 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2167 fe7b0351 Michael Hanselmann

2168 fe7b0351 Michael Hanselmann
    """
2169 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2170 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2171 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2172 fe7b0351 Michael Hanselmann
    return env, nl, nl
2173 fe7b0351 Michael Hanselmann
2174 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2175 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2176 fe7b0351 Michael Hanselmann

2177 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2178 fe7b0351 Michael Hanselmann

2179 fe7b0351 Michael Hanselmann
    """
2180 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2181 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2182 fe7b0351 Michael Hanselmann
    if instance is None:
2183 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2184 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2185 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2186 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2187 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2188 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2189 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2190 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2191 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2192 fe7b0351 Michael Hanselmann
    if remote_info:
2193 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2194 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2195 3ecf6786 Iustin Pop
                                  instance.primary_node))
2196 d0834de3 Michael Hanselmann
2197 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2198 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2199 d0834de3 Michael Hanselmann
      # OS verification
2200 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2201 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2202 d0834de3 Michael Hanselmann
      if pnode is None:
2203 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2204 3ecf6786 Iustin Pop
                                   self.op.pnode)
2205 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2206 dfa96ded Guido Trotter
      if not os_obj:
2207 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2208 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2209 d0834de3 Michael Hanselmann
2210 fe7b0351 Michael Hanselmann
    self.instance = instance
2211 fe7b0351 Michael Hanselmann
2212 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2213 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2214 fe7b0351 Michael Hanselmann

2215 fe7b0351 Michael Hanselmann
    """
2216 fe7b0351 Michael Hanselmann
    inst = self.instance
2217 fe7b0351 Michael Hanselmann
2218 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2219 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2220 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2221 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2222 d0834de3 Michael Hanselmann
2223 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2224 fe7b0351 Michael Hanselmann
    try:
2225 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2226 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2227 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2228 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2229 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2230 fe7b0351 Michael Hanselmann
    finally:
2231 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2232 fe7b0351 Michael Hanselmann
2233 fe7b0351 Michael Hanselmann
2234 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2235 decd5f45 Iustin Pop
  """Rename an instance.
2236 decd5f45 Iustin Pop

2237 decd5f45 Iustin Pop
  """
2238 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2239 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2240 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2241 decd5f45 Iustin Pop
2242 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2243 decd5f45 Iustin Pop
    """Build hooks env.
2244 decd5f45 Iustin Pop

2245 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2246 decd5f45 Iustin Pop

2247 decd5f45 Iustin Pop
    """
2248 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2249 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2250 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2251 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2252 decd5f45 Iustin Pop
    return env, nl, nl
2253 decd5f45 Iustin Pop
2254 decd5f45 Iustin Pop
  def CheckPrereq(self):
2255 decd5f45 Iustin Pop
    """Check prerequisites.
2256 decd5f45 Iustin Pop

2257 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2258 decd5f45 Iustin Pop

2259 decd5f45 Iustin Pop
    """
2260 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2261 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2262 decd5f45 Iustin Pop
    if instance is None:
2263 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2264 decd5f45 Iustin Pop
                                 self.op.instance_name)
2265 decd5f45 Iustin Pop
    if instance.status != "down":
2266 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2267 decd5f45 Iustin Pop
                                 self.op.instance_name)
2268 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2269 decd5f45 Iustin Pop
    if remote_info:
2270 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2271 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2272 decd5f45 Iustin Pop
                                  instance.primary_node))
2273 decd5f45 Iustin Pop
    self.instance = instance
2274 decd5f45 Iustin Pop
2275 decd5f45 Iustin Pop
    # new name verification
2276 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2277 decd5f45 Iustin Pop
2278 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2279 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2280 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2281 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2282 decd5f45 Iustin Pop
      if not result.failed:
2283 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2284 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2285 decd5f45 Iustin Pop
2286 decd5f45 Iustin Pop
2287 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2288 decd5f45 Iustin Pop
    """Reinstall the instance.
2289 decd5f45 Iustin Pop

2290 decd5f45 Iustin Pop
    """
2291 decd5f45 Iustin Pop
    inst = self.instance
2292 decd5f45 Iustin Pop
    old_name = inst.name
2293 decd5f45 Iustin Pop
2294 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2295 decd5f45 Iustin Pop
2296 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2297 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2298 decd5f45 Iustin Pop
2299 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2300 decd5f45 Iustin Pop
    try:
2301 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2302 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2303 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2304 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2305 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2306 decd5f45 Iustin Pop
        logger.Error(msg)
2307 decd5f45 Iustin Pop
    finally:
2308 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2309 decd5f45 Iustin Pop
2310 decd5f45 Iustin Pop
2311 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2312 a8083063 Iustin Pop
  """Remove an instance.
2313 a8083063 Iustin Pop

2314 a8083063 Iustin Pop
  """
2315 a8083063 Iustin Pop
  HPATH = "instance-remove"
2316 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2317 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2318 a8083063 Iustin Pop
2319 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2320 a8083063 Iustin Pop
    """Build hooks env.
2321 a8083063 Iustin Pop

2322 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2323 a8083063 Iustin Pop

2324 a8083063 Iustin Pop
    """
2325 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2326 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2327 a8083063 Iustin Pop
    return env, nl, nl
2328 a8083063 Iustin Pop
2329 a8083063 Iustin Pop
  def CheckPrereq(self):
2330 a8083063 Iustin Pop
    """Check prerequisites.
2331 a8083063 Iustin Pop

2332 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2333 a8083063 Iustin Pop

2334 a8083063 Iustin Pop
    """
2335 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2336 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2337 a8083063 Iustin Pop
    if instance is None:
2338 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2339 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2340 a8083063 Iustin Pop
    self.instance = instance
2341 a8083063 Iustin Pop
2342 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2343 a8083063 Iustin Pop
    """Remove the instance.
2344 a8083063 Iustin Pop

2345 a8083063 Iustin Pop
    """
2346 a8083063 Iustin Pop
    instance = self.instance
2347 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2348 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2349 a8083063 Iustin Pop
2350 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2351 1d67656e Iustin Pop
      if self.op.ignore_failures:
2352 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2353 1d67656e Iustin Pop
      else:
2354 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2355 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2356 a8083063 Iustin Pop
2357 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2358 a8083063 Iustin Pop
2359 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2360 1d67656e Iustin Pop
      if self.op.ignore_failures:
2361 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2362 1d67656e Iustin Pop
      else:
2363 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2364 a8083063 Iustin Pop
2365 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2366 a8083063 Iustin Pop
2367 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2368 a8083063 Iustin Pop
2369 a8083063 Iustin Pop
2370 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2371 a8083063 Iustin Pop
  """Logical unit for querying instances.
2372 a8083063 Iustin Pop

2373 a8083063 Iustin Pop
  """
2374 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2375 a8083063 Iustin Pop
2376 a8083063 Iustin Pop
  def CheckPrereq(self):
2377 a8083063 Iustin Pop
    """Check prerequisites.
2378 a8083063 Iustin Pop

2379 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2380 a8083063 Iustin Pop

2381 a8083063 Iustin Pop
    """
2382 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
2383 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2384 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2385 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2386 644eeef9 Iustin Pop
                               "sda_size", "sdb_size"],
2387 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2388 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2389 a8083063 Iustin Pop
2390 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2391 069dcc86 Iustin Pop
2392 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2393 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2394 a8083063 Iustin Pop

2395 a8083063 Iustin Pop
    """
2396 069dcc86 Iustin Pop
    instance_names = self.wanted
2397 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2398 a8083063 Iustin Pop
                     in instance_names]
2399 a8083063 Iustin Pop
2400 a8083063 Iustin Pop
    # begin data gathering
2401 a8083063 Iustin Pop
2402 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2403 a8083063 Iustin Pop
2404 a8083063 Iustin Pop
    bad_nodes = []
2405 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2406 a8083063 Iustin Pop
      live_data = {}
2407 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2408 a8083063 Iustin Pop
      for name in nodes:
2409 a8083063 Iustin Pop
        result = node_data[name]
2410 a8083063 Iustin Pop
        if result:
2411 a8083063 Iustin Pop
          live_data.update(result)
2412 a8083063 Iustin Pop
        elif result == False:
2413 a8083063 Iustin Pop
          bad_nodes.append(name)
2414 a8083063 Iustin Pop
        # else no instance is alive
2415 a8083063 Iustin Pop
    else:
2416 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2417 a8083063 Iustin Pop
2418 a8083063 Iustin Pop
    # end data gathering
2419 a8083063 Iustin Pop
2420 a8083063 Iustin Pop
    output = []
2421 a8083063 Iustin Pop
    for instance in instance_list:
2422 a8083063 Iustin Pop
      iout = []
2423 a8083063 Iustin Pop
      for field in self.op.output_fields:
2424 a8083063 Iustin Pop
        if field == "name":
2425 a8083063 Iustin Pop
          val = instance.name
2426 a8083063 Iustin Pop
        elif field == "os":
2427 a8083063 Iustin Pop
          val = instance.os
2428 a8083063 Iustin Pop
        elif field == "pnode":
2429 a8083063 Iustin Pop
          val = instance.primary_node
2430 a8083063 Iustin Pop
        elif field == "snodes":
2431 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2432 a8083063 Iustin Pop
        elif field == "admin_state":
2433 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2434 a8083063 Iustin Pop
        elif field == "oper_state":
2435 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2436 8a23d2d3 Iustin Pop
            val = None
2437 a8083063 Iustin Pop
          else:
2438 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2439 a8083063 Iustin Pop
        elif field == "admin_ram":
2440 a8083063 Iustin Pop
          val = instance.memory
2441 a8083063 Iustin Pop
        elif field == "oper_ram":
2442 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2443 8a23d2d3 Iustin Pop
            val = None
2444 a8083063 Iustin Pop
          elif instance.name in live_data:
2445 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2446 a8083063 Iustin Pop
          else:
2447 a8083063 Iustin Pop
            val = "-"
2448 a8083063 Iustin Pop
        elif field == "disk_template":
2449 a8083063 Iustin Pop
          val = instance.disk_template
2450 a8083063 Iustin Pop
        elif field == "ip":
2451 a8083063 Iustin Pop
          val = instance.nics[0].ip
2452 a8083063 Iustin Pop
        elif field == "bridge":
2453 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2454 a8083063 Iustin Pop
        elif field == "mac":
2455 a8083063 Iustin Pop
          val = instance.nics[0].mac
2456 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2457 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2458 644eeef9 Iustin Pop
          if disk is None:
2459 8a23d2d3 Iustin Pop
            val = None
2460 644eeef9 Iustin Pop
          else:
2461 644eeef9 Iustin Pop
            val = disk.size
2462 a8083063 Iustin Pop
        else:
2463 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2464 a8083063 Iustin Pop
        iout.append(val)
2465 a8083063 Iustin Pop
      output.append(iout)
2466 a8083063 Iustin Pop
2467 a8083063 Iustin Pop
    return output
2468 a8083063 Iustin Pop
2469 a8083063 Iustin Pop
2470 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2471 a8083063 Iustin Pop
  """Failover an instance.
2472 a8083063 Iustin Pop

2473 a8083063 Iustin Pop
  """
2474 a8083063 Iustin Pop
  HPATH = "instance-failover"
2475 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2476 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2477 a8083063 Iustin Pop
2478 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2479 a8083063 Iustin Pop
    """Build hooks env.
2480 a8083063 Iustin Pop

2481 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2482 a8083063 Iustin Pop

2483 a8083063 Iustin Pop
    """
2484 a8083063 Iustin Pop
    env = {
2485 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2486 a8083063 Iustin Pop
      }
2487 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2488 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2489 a8083063 Iustin Pop
    return env, nl, nl
2490 a8083063 Iustin Pop
2491 a8083063 Iustin Pop
  def CheckPrereq(self):
2492 a8083063 Iustin Pop
    """Check prerequisites.
2493 a8083063 Iustin Pop

2494 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2495 a8083063 Iustin Pop

2496 a8083063 Iustin Pop
    """
2497 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2498 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2499 a8083063 Iustin Pop
    if instance is None:
2500 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2501 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2502 a8083063 Iustin Pop
2503 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2504 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2505 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2506 2a710df1 Michael Hanselmann
2507 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2508 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2509 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2510 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2511 2a710df1 Michael Hanselmann
2512 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2513 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2514 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2515 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2516 3a7c308e Guido Trotter
    if not info:
2517 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
2518 3ecf6786 Iustin Pop
                                 " from node '%s'" % nodeinfo)
2519 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2520 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Not enough memory on target node %s."
2521 3ecf6786 Iustin Pop
                                 " %d MB available, %d MB required" %
2522 3ecf6786 Iustin Pop
                                 (target_node, info['memory_free'],
2523 3ecf6786 Iustin Pop
                                  instance.memory))
2524 3a7c308e Guido Trotter
2525 a8083063 Iustin Pop
    # check bridge existance
2526 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2527 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2528 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2529 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2530 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2531 a8083063 Iustin Pop
2532 a8083063 Iustin Pop
    self.instance = instance
2533 a8083063 Iustin Pop
2534 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2535 a8083063 Iustin Pop
    """Failover an instance.
2536 a8083063 Iustin Pop

2537 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2538 a8083063 Iustin Pop
    starting it on the secondary.
2539 a8083063 Iustin Pop

2540 a8083063 Iustin Pop
    """
2541 a8083063 Iustin Pop
    instance = self.instance
2542 a8083063 Iustin Pop
2543 a8083063 Iustin Pop
    source_node = instance.primary_node
2544 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2545 a8083063 Iustin Pop
2546 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2547 a8083063 Iustin Pop
    for dev in instance.disks:
2548 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2549 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2550 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2551 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2552 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2553 a8083063 Iustin Pop
2554 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2555 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2556 a8083063 Iustin Pop
2557 a8083063 Iustin Pop
    if not nodeinfo:
2558 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact target node %s." %
2559 3ecf6786 Iustin Pop
                               target_node)
2560 a8083063 Iustin Pop
2561 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2562 a8083063 Iustin Pop
    memory = instance.memory
2563 a8083063 Iustin Pop
    if memory > free_memory:
2564 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to create instance %s on"
2565 3ecf6786 Iustin Pop
                               " node %s. needed %s MiB, available %s MiB" %
2566 3ecf6786 Iustin Pop
                               (instance.name, target_node, memory,
2567 3ecf6786 Iustin Pop
                                free_memory))
2568 a8083063 Iustin Pop
2569 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2570 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2571 a8083063 Iustin Pop
                (instance.name, source_node))
2572 a8083063 Iustin Pop
2573 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2574 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2575 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2576 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2577 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2578 24a40d57 Iustin Pop
      else:
2579 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2580 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2581 a8083063 Iustin Pop
2582 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2583 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2584 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2585 a8083063 Iustin Pop
2586 a8083063 Iustin Pop
    instance.primary_node = target_node
2587 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2588 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2589 a8083063 Iustin Pop
2590 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2591 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2592 a8083063 Iustin Pop
                (instance.name, target_node))
2593 a8083063 Iustin Pop
2594 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2595 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2596 a8083063 Iustin Pop
    if not disks_ok:
2597 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2598 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2599 a8083063 Iustin Pop
2600 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2601 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2602 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2603 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2604 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2605 a8083063 Iustin Pop
2606 a8083063 Iustin Pop
2607 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2608 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2609 a8083063 Iustin Pop

2610 a8083063 Iustin Pop
  This always creates all devices.
2611 a8083063 Iustin Pop

2612 a8083063 Iustin Pop
  """
2613 a8083063 Iustin Pop
  if device.children:
2614 a8083063 Iustin Pop
    for child in device.children:
2615 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2616 a8083063 Iustin Pop
        return False
2617 a8083063 Iustin Pop
2618 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2619 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2620 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2621 a8083063 Iustin Pop
  if not new_id:
2622 a8083063 Iustin Pop
    return False
2623 a8083063 Iustin Pop
  if device.physical_id is None:
2624 a8083063 Iustin Pop
    device.physical_id = new_id
2625 a8083063 Iustin Pop
  return True
2626 a8083063 Iustin Pop
2627 a8083063 Iustin Pop
2628 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2629 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2630 a8083063 Iustin Pop

2631 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2632 a8083063 Iustin Pop
  all its children.
2633 a8083063 Iustin Pop

2634 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2635 a8083063 Iustin Pop

2636 a8083063 Iustin Pop
  """
2637 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2638 a8083063 Iustin Pop
    force = True
2639 a8083063 Iustin Pop
  if device.children:
2640 a8083063 Iustin Pop
    for child in device.children:
2641 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2642 3f78eef2 Iustin Pop
                                        child, force, info):
2643 a8083063 Iustin Pop
        return False
2644 a8083063 Iustin Pop
2645 a8083063 Iustin Pop
  if not force:
2646 a8083063 Iustin Pop
    return True
2647 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2648 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2649 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2650 a8083063 Iustin Pop
  if not new_id:
2651 a8083063 Iustin Pop
    return False
2652 a8083063 Iustin Pop
  if device.physical_id is None:
2653 a8083063 Iustin Pop
    device.physical_id = new_id
2654 a8083063 Iustin Pop
  return True
2655 a8083063 Iustin Pop
2656 a8083063 Iustin Pop
2657 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2658 923b1523 Iustin Pop
  """Generate a suitable LV name.
2659 923b1523 Iustin Pop

2660 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2661 923b1523 Iustin Pop

2662 923b1523 Iustin Pop
  """
2663 923b1523 Iustin Pop
  results = []
2664 923b1523 Iustin Pop
  for val in exts:
2665 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2666 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2667 923b1523 Iustin Pop
  return results
2668 923b1523 Iustin Pop
2669 923b1523 Iustin Pop
2670 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2671 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2672 a8083063 Iustin Pop

2673 a8083063 Iustin Pop
  """
2674 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2675 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2676 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2677 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2678 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2679 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2680 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
2681 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2682 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2683 a8083063 Iustin Pop
  return drbd_dev
2684 a8083063 Iustin Pop
2685 a8083063 Iustin Pop
2686 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2687 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2688 a1f445d3 Iustin Pop

2689 a1f445d3 Iustin Pop
  """
2690 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2691 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2692 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2693 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2694 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2695 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2696 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2697 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2698 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2699 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2700 a1f445d3 Iustin Pop
  return drbd_dev
2701 a1f445d3 Iustin Pop
2702 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2703 a8083063 Iustin Pop
                          instance_name, primary_node,
2704 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2705 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2706 a8083063 Iustin Pop

2707 a8083063 Iustin Pop
  """
2708 a8083063 Iustin Pop
  #TODO: compute space requirements
2709 a8083063 Iustin Pop
2710 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2711 a8083063 Iustin Pop
  if template_name == "diskless":
2712 a8083063 Iustin Pop
    disks = []
2713 a8083063 Iustin Pop
  elif template_name == "plain":
2714 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2715 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2716 923b1523 Iustin Pop
2717 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2718 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2719 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2720 a8083063 Iustin Pop
                           iv_name = "sda")
2721 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2722 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2723 a8083063 Iustin Pop
                           iv_name = "sdb")
2724 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2725 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2726 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2727 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2728 923b1523 Iustin Pop
2729 923b1523 Iustin Pop
2730 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2731 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2732 fe96220b Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2733 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2734 fe96220b Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2735 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2736 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
2737 a8083063 Iustin Pop
                              size=disk_sz,
2738 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2739 fe96220b Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2740 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2741 fe96220b Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2742 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2743 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
2744 a8083063 Iustin Pop
                              size=swap_sz,
2745 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2746 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2747 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2748 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2749 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2750 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2751 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2752 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2753 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2754 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2755 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
2756 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2757 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2758 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2759 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
2760 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2761 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2762 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2763 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2764 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2765 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2766 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2767 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2768 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2769 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2770 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2771 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2772 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2773 a8083063 Iustin Pop
  else:
2774 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2775 a8083063 Iustin Pop
  return disks
2776 a8083063 Iustin Pop
2777 a8083063 Iustin Pop
2778 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2779 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2780 3ecf6786 Iustin Pop

2781 3ecf6786 Iustin Pop
  """
2782 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2783 a0c3fea1 Michael Hanselmann
2784 a0c3fea1 Michael Hanselmann
2785 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2786 a8083063 Iustin Pop
  """Create all disks for an instance.
2787 a8083063 Iustin Pop

2788 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2789 a8083063 Iustin Pop

2790 a8083063 Iustin Pop
  Args:
2791 a8083063 Iustin Pop
    instance: the instance object
2792 a8083063 Iustin Pop

2793 a8083063 Iustin Pop
  Returns:
2794 a8083063 Iustin Pop
    True or False showing the success of the creation process
2795 a8083063 Iustin Pop

2796 a8083063 Iustin Pop
  """
2797 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2798 a0c3fea1 Michael Hanselmann
2799 a8083063 Iustin Pop
  for device in instance.disks:
2800 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2801 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2802 a8083063 Iustin Pop
    #HARDCODE
2803 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2804 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2805 3f78eef2 Iustin Pop
                                        device, False, info):
2806 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2807 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2808 a8083063 Iustin Pop
        return False
2809 a8083063 Iustin Pop
    #HARDCODE
2810 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2811 3f78eef2 Iustin Pop
                                    instance, device, info):
2812 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2813 a8083063 Iustin Pop
                   device.iv_name)
2814 a8083063 Iustin Pop
      return False
2815 a8083063 Iustin Pop
  return True
2816 a8083063 Iustin Pop
2817 a8083063 Iustin Pop
2818 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2819 a8083063 Iustin Pop
  """Remove all disks for an instance.
2820 a8083063 Iustin Pop

2821 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2822 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2823 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2824 a8083063 Iustin Pop
  with `_CreateDisks()`).
2825 a8083063 Iustin Pop

2826 a8083063 Iustin Pop
  Args:
2827 a8083063 Iustin Pop
    instance: the instance object
2828 a8083063 Iustin Pop

2829 a8083063 Iustin Pop
  Returns:
2830 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2831 a8083063 Iustin Pop

2832 a8083063 Iustin Pop
  """
2833 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2834 a8083063 Iustin Pop
2835 a8083063 Iustin Pop
  result = True
2836 a8083063 Iustin Pop
  for device in instance.disks:
2837 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2838 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2839 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2840 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2841 a8083063 Iustin Pop
                     " continuing anyway" %
2842 a8083063 Iustin Pop
                     (device.iv_name, node))
2843 a8083063 Iustin Pop
        result = False
2844 a8083063 Iustin Pop
  return result
2845 a8083063 Iustin Pop
2846 a8083063 Iustin Pop
2847 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2848 a8083063 Iustin Pop
  """Create an instance.
2849 a8083063 Iustin Pop

2850 a8083063 Iustin Pop
  """
2851 a8083063 Iustin Pop
  HPATH = "instance-add"
2852 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2853 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2854 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2855 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
2856 a8083063 Iustin Pop
2857 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2858 a8083063 Iustin Pop
    """Build hooks env.
2859 a8083063 Iustin Pop

2860 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2861 a8083063 Iustin Pop

2862 a8083063 Iustin Pop
    """
2863 a8083063 Iustin Pop
    env = {
2864 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2865 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2866 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2867 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2868 a8083063 Iustin Pop
      }
2869 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2870 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2871 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2872 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2873 396e1b78 Michael Hanselmann
2874 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2875 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2876 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2877 396e1b78 Michael Hanselmann
      status=self.instance_status,
2878 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2879 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2880 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2881 396e1b78 Michael Hanselmann
      nics=[(self.inst_ip, self.op.bridge)],
2882 396e1b78 Michael Hanselmann
    ))
2883 a8083063 Iustin Pop
2884 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2885 a8083063 Iustin Pop
          self.secondaries)
2886 a8083063 Iustin Pop
    return env, nl, nl
2887 a8083063 Iustin Pop
2888 a8083063 Iustin Pop
2889 a8083063 Iustin Pop
  def CheckPrereq(self):
2890 a8083063 Iustin Pop
    """Check prerequisites.
2891 a8083063 Iustin Pop

2892 a8083063 Iustin Pop
    """
2893 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2894 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2895 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2896 3ecf6786 Iustin Pop
                                 self.op.mode)
2897 a8083063 Iustin Pop
2898 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2899 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2900 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2901 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2902 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2903 3ecf6786 Iustin Pop
                                   " node and path options")
2904 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2905 a8083063 Iustin Pop
      if src_node_full is None:
2906 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2907 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2908 a8083063 Iustin Pop
2909 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2910 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2911 a8083063 Iustin Pop
2912 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2913 a8083063 Iustin Pop
2914 a8083063 Iustin Pop
      if not export_info:
2915 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2916 a8083063 Iustin Pop
2917 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2918 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2919 a8083063 Iustin Pop
2920 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2921 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2922 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2923 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2924 a8083063 Iustin Pop
2925 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2926 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2927 3ecf6786 Iustin Pop
                                   " one data disk")
2928 a8083063 Iustin Pop
2929 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2930 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2931 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2932 a8083063 Iustin Pop
                                                         'disk0_dump'))
2933 a8083063 Iustin Pop
      self.src_image = diskimage
2934 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2935 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2936 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2937 a8083063 Iustin Pop
2938 a8083063 Iustin Pop
    # check primary node
2939 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2940 a8083063 Iustin Pop
    if pnode is None:
2941 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2942 3ecf6786 Iustin Pop
                                 self.op.pnode)
2943 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2944 a8083063 Iustin Pop
    self.pnode = pnode
2945 a8083063 Iustin Pop
    self.secondaries = []
2946 a8083063 Iustin Pop
    # disk template and mirror node verification
2947 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2948 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2949 a8083063 Iustin Pop
2950 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
2951 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2952 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
2953 3ecf6786 Iustin Pop
                                   " a mirror node")
2954 a8083063 Iustin Pop
2955 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2956 a8083063 Iustin Pop
      if snode_name is None:
2957 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2958 3ecf6786 Iustin Pop
                                   self.op.snode)
2959 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2960 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
2961 3ecf6786 Iustin Pop
                                   " the primary node.")
2962 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2963 a8083063 Iustin Pop
2964 ed1ebc60 Guido Trotter
    # Check lv size requirements
2965 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2966 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2967 ed1ebc60 Guido Trotter
2968 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2969 ed1ebc60 Guido Trotter
    req_size_dict = {
2970 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2971 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2972 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2973 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2974 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2975 a1f445d3 Iustin Pop
      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
2976 ed1ebc60 Guido Trotter
    }
2977 ed1ebc60 Guido Trotter
2978 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2979 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
2980 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
2981 ed1ebc60 Guido Trotter
2982 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2983 ed1ebc60 Guido Trotter
2984 ed1ebc60 Guido Trotter
    for node in nodenames:
2985 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2986 ed1ebc60 Guido Trotter
      if not info:
2987 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
2988 3ecf6786 Iustin Pop
                                   " from node '%s'" % nodeinfo)
2989 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2990 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s."
2991 3ecf6786 Iustin Pop
                                   " %d MB available, %d MB required" %
2992 3ecf6786 Iustin Pop
                                   (node, info['vg_free'], req_size))
2993 ed1ebc60 Guido Trotter
2994 a8083063 Iustin Pop
    # os verification
2995 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2996 dfa96ded Guido Trotter
    if not os_obj:
2997 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
2998 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
2999 a8083063 Iustin Pop
3000 a8083063 Iustin Pop
    # instance verification
3001 89e1fc26 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3002 a8083063 Iustin Pop
3003 bcf043c9 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3004 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3005 a8083063 Iustin Pop
    if instance_name in instance_list:
3006 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3007 3ecf6786 Iustin Pop
                                 instance_name)
3008 a8083063 Iustin Pop
3009 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
3010 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
3011 a8083063 Iustin Pop
      inst_ip = None
3012 a8083063 Iustin Pop
    elif ip.lower() == "auto":
3013 bcf043c9 Iustin Pop
      inst_ip = hostname1.ip
3014 a8083063 Iustin Pop
    else:
3015 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
3016 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3017 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
3018 a8083063 Iustin Pop
      inst_ip = ip
3019 a8083063 Iustin Pop
    self.inst_ip = inst_ip
3020 a8083063 Iustin Pop
3021 bdd55f71 Iustin Pop
    if self.op.start and not self.op.ip_check:
3022 bdd55f71 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3023 bdd55f71 Iustin Pop
                                 " adding an instance in start mode")
3024 bdd55f71 Iustin Pop
3025 bdd55f71 Iustin Pop
    if self.op.ip_check:
3026 16abfbc2 Alexander Schreiber
      if utils.TcpPing(utils.HostInfo().name, hostname1.ip,
3027 16abfbc2 Alexander Schreiber
                       constants.DEFAULT_NODED_PORT):
3028 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3029 16abfbc2 Alexander Schreiber
                                   (hostname1.ip, instance_name))
3030 a8083063 Iustin Pop
3031 1862d460 Alexander Schreiber
    # MAC address verification
3032 1862d460 Alexander Schreiber
    if self.op.mac != "auto":
3033 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.op.mac.lower()):
3034 1862d460 Alexander Schreiber
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3035 1862d460 Alexander Schreiber
                                   self.op.mac)
3036 1862d460 Alexander Schreiber
3037 a8083063 Iustin Pop
    # bridge verification
3038 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3039 a8083063 Iustin Pop
    if bridge is None:
3040 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3041 a8083063 Iustin Pop
    else:
3042 a8083063 Iustin Pop
      self.op.bridge = bridge
3043 a8083063 Iustin Pop
3044 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3045 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3046 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3047 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3048 a8083063 Iustin Pop
3049 a8083063 Iustin Pop
    if self.op.start:
3050 a8083063 Iustin Pop
      self.instance_status = 'up'
3051 a8083063 Iustin Pop
    else:
3052 a8083063 Iustin Pop
      self.instance_status = 'down'
3053 a8083063 Iustin Pop
3054 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3055 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3056 a8083063 Iustin Pop

3057 a8083063 Iustin Pop
    """
3058 a8083063 Iustin Pop
    instance = self.op.instance_name
3059 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3060 a8083063 Iustin Pop
3061 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3062 1862d460 Alexander Schreiber
      mac_address=self.cfg.GenerateMAC()
3063 1862d460 Alexander Schreiber
    else:
3064 1862d460 Alexander Schreiber
      mac_address=self.op.mac
3065 1862d460 Alexander Schreiber
3066 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3067 a8083063 Iustin Pop
    if self.inst_ip is not None:
3068 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3069 a8083063 Iustin Pop
3070 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3071 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3072 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3073 2a6469d5 Alexander Schreiber
    else:
3074 2a6469d5 Alexander Schreiber
      network_port = None
3075 58acb49d Alexander Schreiber
3076 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3077 a8083063 Iustin Pop
                                  self.op.disk_template,
3078 a8083063 Iustin Pop
                                  instance, pnode_name,
3079 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3080 a8083063 Iustin Pop
                                  self.op.swap_size)
3081 a8083063 Iustin Pop
3082 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3083 a8083063 Iustin Pop
                            primary_node=pnode_name,
3084 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3085 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3086 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3087 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3088 a8083063 Iustin Pop
                            status=self.instance_status,
3089 58acb49d Alexander Schreiber
                            network_port=network_port,
3090 a8083063 Iustin Pop
                            )
3091 a8083063 Iustin Pop
3092 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3093 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3094 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3095 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3096 a8083063 Iustin Pop
3097 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3098 a8083063 Iustin Pop
3099 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3100 a8083063 Iustin Pop
3101 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3102 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3103 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3104 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3105 a8083063 Iustin Pop
      time.sleep(15)
3106 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3107 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3108 a8083063 Iustin Pop
    else:
3109 a8083063 Iustin Pop
      disk_abort = False
3110 a8083063 Iustin Pop
3111 a8083063 Iustin Pop
    if disk_abort:
3112 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3113 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3114 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3115 3ecf6786 Iustin Pop
                               " this instance")
3116 a8083063 Iustin Pop
3117 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3118 a8083063 Iustin Pop
                (instance, pnode_name))
3119 a8083063 Iustin Pop
3120 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3121 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3122 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3123 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3124 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3125 3ecf6786 Iustin Pop
                                   " on node %s" %
3126 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3127 a8083063 Iustin Pop
3128 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3129 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3130 a8083063 Iustin Pop
        src_node = self.op.src_node
3131 a8083063 Iustin Pop
        src_image = self.src_image
3132 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3133 a8083063 Iustin Pop
                                                src_node, src_image):
3134 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3135 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3136 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3137 a8083063 Iustin Pop
      else:
3138 a8083063 Iustin Pop
        # also checked in the prereq part
3139 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3140 3ecf6786 Iustin Pop
                                     % self.op.mode)
3141 a8083063 Iustin Pop
3142 a8083063 Iustin Pop
    if self.op.start:
3143 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3144 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3145 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3146 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3147 a8083063 Iustin Pop
3148 a8083063 Iustin Pop
3149 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3150 a8083063 Iustin Pop
  """Connect to an instance's console.
3151 a8083063 Iustin Pop

3152 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3153 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3154 a8083063 Iustin Pop
  console.
3155 a8083063 Iustin Pop

3156 a8083063 Iustin Pop
  """
3157 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3158 a8083063 Iustin Pop
3159 a8083063 Iustin Pop
  def CheckPrereq(self):
3160 a8083063 Iustin Pop
    """Check prerequisites.
3161 a8083063 Iustin Pop

3162 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3163 a8083063 Iustin Pop

3164 a8083063 Iustin Pop
    """
3165 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3166 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3167 a8083063 Iustin Pop
    if instance is None:
3168 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3169 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3170 a8083063 Iustin Pop
    self.instance = instance
3171 a8083063 Iustin Pop
3172 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3173 a8083063 Iustin Pop
    """Connect to the console of an instance
3174 a8083063 Iustin Pop

3175 a8083063 Iustin Pop
    """
3176 a8083063 Iustin Pop
    instance = self.instance
3177 a8083063 Iustin Pop
    node = instance.primary_node
3178 a8083063 Iustin Pop
3179 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3180 a8083063 Iustin Pop
    if node_insts is False:
3181 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3182 a8083063 Iustin Pop
3183 a8083063 Iustin Pop
    if instance.name not in node_insts:
3184 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3185 a8083063 Iustin Pop
3186 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3187 a8083063 Iustin Pop
3188 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3189 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3190 82122173 Iustin Pop
    # build ssh cmdline
3191 82122173 Iustin Pop
    argv = ["ssh", "-q", "-t"]
3192 82122173 Iustin Pop
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
3193 82122173 Iustin Pop
    argv.extend(ssh.BATCH_MODE_OPTS)
3194 82122173 Iustin Pop
    argv.append(node)
3195 82122173 Iustin Pop
    argv.append(console_cmd)
3196 82122173 Iustin Pop
    return "ssh", argv
3197 a8083063 Iustin Pop
3198 a8083063 Iustin Pop
3199 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
3200 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
3201 a8083063 Iustin Pop

3202 a8083063 Iustin Pop
  """
3203 a8083063 Iustin Pop
  HPATH = "mirror-add"
3204 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3205 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3206 a8083063 Iustin Pop
3207 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3208 a8083063 Iustin Pop
    """Build hooks env.
3209 a8083063 Iustin Pop

3210 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3211 a8083063 Iustin Pop

3212 a8083063 Iustin Pop
    """
3213 a8083063 Iustin Pop
    env = {
3214 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3215 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3216 a8083063 Iustin Pop
      }
3217 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3218 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3219 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3220 a8083063 Iustin Pop
    return env, nl, nl
3221 a8083063 Iustin Pop
3222 a8083063 Iustin Pop
  def CheckPrereq(self):
3223 a8083063 Iustin Pop
    """Check prerequisites.
3224 a8083063 Iustin Pop

3225 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3226 a8083063 Iustin Pop

3227 a8083063 Iustin Pop
    """
3228 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3229 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3230 a8083063 Iustin Pop
    if instance is None:
3231 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3232 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3233 a8083063 Iustin Pop
    self.instance = instance
3234 a8083063 Iustin Pop
3235 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3236 a8083063 Iustin Pop
    if remote_node is None:
3237 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3238 a8083063 Iustin Pop
    self.remote_node = remote_node
3239 a8083063 Iustin Pop
3240 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3241 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3242 3ecf6786 Iustin Pop
                                 " the instance.")
3243 a8083063 Iustin Pop
3244 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3245 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3246 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3247 a8083063 Iustin Pop
    for disk in instance.disks:
3248 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3249 a8083063 Iustin Pop
        break
3250 a8083063 Iustin Pop
    else:
3251 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3252 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3253 a8083063 Iustin Pop
    if len(disk.children) > 1:
3254 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("The device already has two slave devices."
3255 f4bc1f2c Michael Hanselmann
                                 " This would create a 3-disk raid1 which we"
3256 f4bc1f2c Michael Hanselmann
                                 " don't allow.")
3257 a8083063 Iustin Pop
    self.disk = disk
3258 a8083063 Iustin Pop
3259 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3260 a8083063 Iustin Pop
    """Add the mirror component
3261 a8083063 Iustin Pop

3262 a8083063 Iustin Pop
    """
3263 a8083063 Iustin Pop
    disk = self.disk
3264 a8083063 Iustin Pop
    instance = self.instance
3265 a8083063 Iustin Pop
3266 a8083063 Iustin Pop
    remote_node = self.remote_node
3267 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3268 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
3269 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3270 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
3271 a8083063 Iustin Pop
3272 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
3273 a8083063 Iustin Pop
    #HARDCODE
3274 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, instance,
3275 3f78eef2 Iustin Pop
                                      new_drbd, False,
3276 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3277 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
3278 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
3279 a8083063 Iustin Pop
3280 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
3281 a8083063 Iustin Pop
    #HARDCODE
3282 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node,
3283 3f78eef2 Iustin Pop
                                    instance, new_drbd,
3284 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
3285 a8083063 Iustin Pop
      # remove secondary dev
3286 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3287 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
3288 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
3289 a8083063 Iustin Pop
3290 a8083063 Iustin Pop
    # the device exists now
3291 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
3292 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
3293 153d9724 Iustin Pop
    if not rpc.call_blockdev_addchildren(instance.primary_node,
3294 153d9724 Iustin Pop
                                         disk, [new_drbd]):
3295 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
3296 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3297 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3298 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
3299 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3300 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3301 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
3302 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
3303 a8083063 Iustin Pop
3304 a8083063 Iustin Pop
    disk.children.append(new_drbd)
3305 a8083063 Iustin Pop
3306 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3307 a8083063 Iustin Pop
3308 5bfac263 Iustin Pop
    _WaitForSync(self.cfg, instance, self.proc)
3309 a8083063 Iustin Pop
3310 a8083063 Iustin Pop
    return 0
3311 a8083063 Iustin Pop
3312 a8083063 Iustin Pop
3313 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
3314 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
3315 a8083063 Iustin Pop

3316 a8083063 Iustin Pop
  """
3317 a8083063 Iustin Pop
  HPATH = "mirror-remove"
3318 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3319 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3320 a8083063 Iustin Pop
3321 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3322 a8083063 Iustin Pop
    """Build hooks env.
3323 a8083063 Iustin Pop

3324 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3325 a8083063 Iustin Pop

3326 a8083063 Iustin Pop
    """
3327 a8083063 Iustin Pop
    env = {
3328 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3329 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
3330 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
3331 a8083063 Iustin Pop
      }
3332 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3333 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3334 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3335 a8083063 Iustin Pop
    return env, nl, nl
3336 a8083063 Iustin Pop
3337 a8083063 Iustin Pop
  def CheckPrereq(self):
3338 a8083063 Iustin Pop
    """Check prerequisites.
3339 a8083063 Iustin Pop

3340 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3341 a8083063 Iustin Pop

3342 a8083063 Iustin Pop
    """
3343 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3344 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3345 a8083063 Iustin Pop
    if instance is None:
3346 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3347 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3348 a8083063 Iustin Pop
    self.instance = instance
3349 a8083063 Iustin Pop
3350 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3351 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3352 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3353 a8083063 Iustin Pop
    for disk in instance.disks:
3354 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3355 a8083063 Iustin Pop
        break
3356 a8083063 Iustin Pop
    else:
3357 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3358 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3359 a8083063 Iustin Pop
    for child in disk.children:
3360 fe96220b Iustin Pop
      if (child.dev_type == constants.LD_DRBD7 and
3361 fe96220b Iustin Pop
          child.logical_id[2] == self.op.disk_id):
3362 a8083063 Iustin Pop
        break
3363 a8083063 Iustin Pop
    else:
3364 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3365 a8083063 Iustin Pop
3366 a8083063 Iustin Pop
    if len(disk.children) < 2:
3367 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3368 3ecf6786 Iustin Pop
                                 " a mirror.")
3369 a8083063 Iustin Pop
    self.disk = disk
3370 a8083063 Iustin Pop
    self.child = child
3371 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3372 a8083063 Iustin Pop
      oid = 1
3373 a8083063 Iustin Pop
    else:
3374 a8083063 Iustin Pop
      oid = 0
3375 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3376 a8083063 Iustin Pop
3377 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3378 a8083063 Iustin Pop
    """Remove the mirror component
3379 a8083063 Iustin Pop

3380 a8083063 Iustin Pop
    """
3381 a8083063 Iustin Pop
    instance = self.instance
3382 a8083063 Iustin Pop
    disk = self.disk
3383 a8083063 Iustin Pop
    child = self.child
3384 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3385 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3386 153d9724 Iustin Pop
    if not rpc.call_blockdev_removechildren(instance.primary_node,
3387 153d9724 Iustin Pop
                                            disk, [child]):
3388 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3389 a8083063 Iustin Pop
3390 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3391 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3392 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3393 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3394 a8083063 Iustin Pop
                     " continuing operation." % node)
3395 a8083063 Iustin Pop
3396 a8083063 Iustin Pop
    disk.children.remove(child)
3397 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3398 a8083063 Iustin Pop
3399 a8083063 Iustin Pop
3400 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3401 a8083063 Iustin Pop
  """Replace the disks of an instance.
3402 a8083063 Iustin Pop

3403 a8083063 Iustin Pop
  """
3404 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3405 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3406 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3407 a8083063 Iustin Pop
3408 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3409 a8083063 Iustin Pop
    """Build hooks env.
3410 a8083063 Iustin Pop

3411 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3412 a8083063 Iustin Pop

3413 a8083063 Iustin Pop
    """
3414 a8083063 Iustin Pop
    env = {
3415 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3416 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3417 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3418 a8083063 Iustin Pop
      }
3419 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3420 0834c866 Iustin Pop
    nl = [
3421 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3422 0834c866 Iustin Pop
      self.instance.primary_node,
3423 0834c866 Iustin Pop
      ]
3424 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3425 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3426 a8083063 Iustin Pop
    return env, nl, nl
3427 a8083063 Iustin Pop
3428 a8083063 Iustin Pop
  def CheckPrereq(self):
3429 a8083063 Iustin Pop
    """Check prerequisites.
3430 a8083063 Iustin Pop

3431 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3432 a8083063 Iustin Pop

3433 a8083063 Iustin Pop
    """
3434 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3435 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3436 a8083063 Iustin Pop
    if instance is None:
3437 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3438 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3439 a8083063 Iustin Pop
    self.instance = instance
3440 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3441 a8083063 Iustin Pop
3442 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3443 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3444 a9e0c397 Iustin Pop
                                 " network mirrored.")
3445 a8083063 Iustin Pop
3446 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3447 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3448 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3449 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3450 a8083063 Iustin Pop
3451 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3452 a9e0c397 Iustin Pop
3453 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3454 a9e0c397 Iustin Pop
    if remote_node is not None:
3455 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3456 a8083063 Iustin Pop
      if remote_node is None:
3457 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3458 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3459 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3460 a9e0c397 Iustin Pop
    else:
3461 a9e0c397 Iustin Pop
      self.remote_node_info = None
3462 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3463 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3464 3ecf6786 Iustin Pop
                                 " the instance.")
3465 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3466 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3467 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3468 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3469 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3470 0834c866 Iustin Pop
                                   " replacement")
3471 a9e0c397 Iustin Pop
      # the user gave the current secondary, switch to
3472 0834c866 Iustin Pop
      # 'no-replace-secondary' mode for drbd7
3473 a9e0c397 Iustin Pop
      remote_node = None
3474 a9e0c397 Iustin Pop
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3475 a9e0c397 Iustin Pop
        self.op.mode != constants.REPLACE_DISK_ALL):
3476 a9e0c397 Iustin Pop
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3477 a9e0c397 Iustin Pop
                                 " disks replacement, not individual ones")
3478 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3479 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3480 7df43a76 Iustin Pop
          remote_node is not None):
3481 7df43a76 Iustin Pop
        # switch to replace secondary mode
3482 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3483 7df43a76 Iustin Pop
3484 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3485 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3486 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3487 a9e0c397 Iustin Pop
                                   " both at once")
3488 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3489 a9e0c397 Iustin Pop
        if remote_node is not None:
3490 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3491 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3492 a9e0c397 Iustin Pop
                                     " node disk replacement")
3493 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3494 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3495 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3496 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3497 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3498 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3499 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3500 a9e0c397 Iustin Pop
      else:
3501 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3502 a9e0c397 Iustin Pop
3503 a9e0c397 Iustin Pop
    for name in self.op.disks:
3504 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3505 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3506 a9e0c397 Iustin Pop
                                   (name, instance.name))
3507 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3508 a8083063 Iustin Pop
3509 a9e0c397 Iustin Pop
  def _ExecRR1(self, feedback_fn):
3510 a8083063 Iustin Pop
    """Replace the disks of an instance.
3511 a8083063 Iustin Pop

3512 a8083063 Iustin Pop
    """
3513 a8083063 Iustin Pop
    instance = self.instance
3514 a8083063 Iustin Pop
    iv_names = {}
3515 a8083063 Iustin Pop
    # start of work
3516 a9e0c397 Iustin Pop
    if self.op.remote_node is None:
3517 a9e0c397 Iustin Pop
      remote_node = self.sec_node
3518 a9e0c397 Iustin Pop
    else:
3519 a9e0c397 Iustin Pop
      remote_node = self.op.remote_node
3520 a8083063 Iustin Pop
    cfg = self.cfg
3521 a8083063 Iustin Pop
    for dev in instance.disks:
3522 a8083063 Iustin Pop
      size = dev.size
3523 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3524 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3525 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3526 923b1523 Iustin Pop
                                       remote_node, size, names)
3527 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3528 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3529 a8083063 Iustin Pop
                  dev.iv_name)
3530 a8083063 Iustin Pop
      #HARDCODE
3531 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3532 3f78eef2 Iustin Pop
                                        new_drbd, False,
3533 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3534 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create new component on secondary"
3535 f4bc1f2c Michael Hanselmann
                                 " node %s. Full abort, cleanup manually!" %
3536 3ecf6786 Iustin Pop
                                 remote_node)
3537 a8083063 Iustin Pop
3538 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3539 a8083063 Iustin Pop
      #HARDCODE
3540 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3541 3f78eef2 Iustin Pop
                                      instance, new_drbd,
3542 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3543 a8083063 Iustin Pop
        # remove secondary dev
3544 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3545 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3546 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create volume on primary!"
3547 f4bc1f2c Michael Hanselmann
                                 " Full abort, cleanup manually!!")
3548 a8083063 Iustin Pop
3549 a8083063 Iustin Pop
      # the device exists now
3550 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3551 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3552 153d9724 Iustin Pop
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3553 153d9724 Iustin Pop
                                           [new_drbd]):
3554 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3555 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3556 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3557 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3558 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3559 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3560 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3561 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3562 a8083063 Iustin Pop
3563 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3564 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3565 a8083063 Iustin Pop
3566 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3567 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3568 a8083063 Iustin Pop
    # return value
3569 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3570 a8083063 Iustin Pop
3571 a8083063 Iustin Pop
    # so check manually all the devices
3572 a8083063 Iustin Pop
    for name in iv_names:
3573 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3574 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3575 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3576 a8083063 Iustin Pop
      if is_degr:
3577 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3578 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3579 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3580 a8083063 Iustin Pop
      if is_degr:
3581 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3582 a8083063 Iustin Pop
3583 a8083063 Iustin Pop
    for name in iv_names:
3584 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3585 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3586 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3587 153d9724 Iustin Pop
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3588 153d9724 Iustin Pop
                                              dev, [child]):
3589 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3590 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3591 a8083063 Iustin Pop
        continue
3592 a8083063 Iustin Pop
3593 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3594 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3595 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3596 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3597 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3598 a8083063 Iustin Pop
                       " continuing operation." % node)
3599 a8083063 Iustin Pop
3600 a8083063 Iustin Pop
      dev.children.remove(child)
3601 a8083063 Iustin Pop
3602 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3603 a8083063 Iustin Pop
3604 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3605 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3606 a9e0c397 Iustin Pop

3607 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3608 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3609 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3610 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3611 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3612 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3613 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3614 a9e0c397 Iustin Pop
      - wait for sync across all devices
3615 a9e0c397 Iustin Pop
      - for each modified disk:
3616 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3617 a9e0c397 Iustin Pop

3618 a9e0c397 Iustin Pop
    Failures are not very well handled.
3619 cff90b79 Iustin Pop

3620 a9e0c397 Iustin Pop
    """
3621 cff90b79 Iustin Pop
    steps_total = 6
3622 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3623 a9e0c397 Iustin Pop
    instance = self.instance
3624 a9e0c397 Iustin Pop
    iv_names = {}
3625 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3626 a9e0c397 Iustin Pop
    # start of work
3627 a9e0c397 Iustin Pop
    cfg = self.cfg
3628 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3629 cff90b79 Iustin Pop
    oth_node = self.oth_node
3630 cff90b79 Iustin Pop
3631 cff90b79 Iustin Pop
    # Step: check device activation
3632 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3633 cff90b79 Iustin Pop
    info("checking volume groups")
3634 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3635 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3636 cff90b79 Iustin Pop
    if not results:
3637 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3638 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3639 cff90b79 Iustin Pop
      res = results.get(node, False)
3640 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3641 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3642 cff90b79 Iustin Pop
                                 (my_vg, node))
3643 cff90b79 Iustin Pop
    for dev in instance.disks:
3644 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3645 cff90b79 Iustin Pop
        continue
3646 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3647 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3648 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3649 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3650 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3651 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3652 cff90b79 Iustin Pop
3653 cff90b79 Iustin Pop
    # Step: check other node consistency
3654 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3655 cff90b79 Iustin Pop
    for dev in instance.disks:
3656 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3657 cff90b79 Iustin Pop
        continue
3658 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3659 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3660 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3661 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3662 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3663 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3664 cff90b79 Iustin Pop
3665 cff90b79 Iustin Pop
    # Step: create new storage
3666 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3667 a9e0c397 Iustin Pop
    for dev in instance.disks:
3668 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3669 a9e0c397 Iustin Pop
        continue
3670 a9e0c397 Iustin Pop
      size = dev.size
3671 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3672 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3673 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3674 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3675 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3676 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3677 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3678 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3679 a9e0c397 Iustin Pop
      old_lvs = dev.children
3680 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3681 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3682 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3683 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3684 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3685 a9e0c397 Iustin Pop
      # are talking about the secondary node
3686 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3687 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3688 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3689 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3690 a9e0c397 Iustin Pop
                                   " node '%s'" %
3691 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3692 a9e0c397 Iustin Pop
3693 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3694 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3695 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3696 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3697 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3698 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3699 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3700 cff90b79 Iustin Pop
      #dev.children = []
3701 cff90b79 Iustin Pop
      #cfg.Update(instance)
3702 a9e0c397 Iustin Pop
3703 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3704 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3705 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3706 a9e0c397 Iustin Pop
      # using the assumption than logical_id == physical_id (which in
3707 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3708 cff90b79 Iustin Pop
3709 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3710 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3711 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3712 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3713 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3714 cff90b79 Iustin Pop
      rlist = []
3715 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3716 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3717 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3718 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3719 cff90b79 Iustin Pop
3720 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3721 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3722 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3723 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3724 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3725 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3726 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3727 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3728 cff90b79 Iustin Pop
3729 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3730 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3731 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3732 a9e0c397 Iustin Pop
3733 cff90b79 Iustin Pop
      for disk in old_lvs:
3734 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3735 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3736 a9e0c397 Iustin Pop
3737 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3738 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3739 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3740 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3741 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3742 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3743 cff90b79 Iustin Pop
                    " logical volumes")
3744 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3745 a9e0c397 Iustin Pop
3746 a9e0c397 Iustin Pop
      dev.children = new_lvs
3747 a9e0c397 Iustin Pop
      cfg.Update(instance)
3748 a9e0c397 Iustin Pop
3749 cff90b79 Iustin Pop
    # Step: wait for sync
3750 a9e0c397 Iustin Pop
3751 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3752 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3753 a9e0c397 Iustin Pop
    # return value
3754 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3755 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3756 a9e0c397 Iustin Pop
3757 a9e0c397 Iustin Pop
    # so check manually all the devices
3758 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3759 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3760 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3761 a9e0c397 Iustin Pop
      if is_degr:
3762 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3763 a9e0c397 Iustin Pop
3764 cff90b79 Iustin Pop
    # Step: remove old storage
3765 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3766 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3767 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3768 a9e0c397 Iustin Pop
      for lv in old_lvs:
3769 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3770 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3771 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3772 a9e0c397 Iustin Pop
          continue
3773 a9e0c397 Iustin Pop
3774 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3775 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3776 a9e0c397 Iustin Pop

3777 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3778 a9e0c397 Iustin Pop
      - for all disks of the instance:
3779 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3780 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3781 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3782 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3783 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3784 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3785 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3786 a9e0c397 Iustin Pop
          not network enabled
3787 a9e0c397 Iustin Pop
      - wait for sync across all devices
3788 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3789 a9e0c397 Iustin Pop

3790 a9e0c397 Iustin Pop
    Failures are not very well handled.
3791 0834c866 Iustin Pop

3792 a9e0c397 Iustin Pop
    """
3793 0834c866 Iustin Pop
    steps_total = 6
3794 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3795 a9e0c397 Iustin Pop
    instance = self.instance
3796 a9e0c397 Iustin Pop
    iv_names = {}
3797 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3798 a9e0c397 Iustin Pop
    # start of work
3799 a9e0c397 Iustin Pop
    cfg = self.cfg
3800 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3801 a9e0c397 Iustin Pop
    new_node = self.new_node
3802 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3803 0834c866 Iustin Pop
3804 0834c866 Iustin Pop
    # Step: check device activation
3805 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3806 0834c866 Iustin Pop
    info("checking volume groups")
3807 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3808 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3809 0834c866 Iustin Pop
    if not results:
3810 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3811 0834c866 Iustin Pop
    for node in pri_node, new_node:
3812 0834c866 Iustin Pop
      res = results.get(node, False)
3813 0834c866 Iustin Pop
      if not res or my_vg not in res:
3814 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3815 0834c866 Iustin Pop
                                 (my_vg, node))
3816 0834c866 Iustin Pop
    for dev in instance.disks:
3817 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3818 0834c866 Iustin Pop
        continue
3819 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3820 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3821 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3822 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3823 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3824 0834c866 Iustin Pop
3825 0834c866 Iustin Pop
    # Step: check other node consistency
3826 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3827 0834c866 Iustin Pop
    for dev in instance.disks:
3828 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3829 0834c866 Iustin Pop
        continue
3830 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3831 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3832 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3833 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3834 0834c866 Iustin Pop
                                 pri_node)
3835 0834c866 Iustin Pop
3836 0834c866 Iustin Pop
    # Step: create new storage
3837 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3838 a9e0c397 Iustin Pop
    for dev in instance.disks:
3839 a9e0c397 Iustin Pop
      size = dev.size
3840 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3841 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3842 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3843 a9e0c397 Iustin Pop
      # are talking about the secondary node
3844 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3845 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3846 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3847 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3848 a9e0c397 Iustin Pop
                                   " node '%s'" %
3849 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3850 a9e0c397 Iustin Pop
3851 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3852 0834c866 Iustin Pop
3853 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3854 0834c866 Iustin Pop
    for dev in instance.disks:
3855 0834c866 Iustin Pop
      size = dev.size
3856 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3857 a9e0c397 Iustin Pop
      # create new devices on new_node
3858 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3859 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3860 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3861 a9e0c397 Iustin Pop
                              children=dev.children)
3862 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3863 3f78eef2 Iustin Pop
                                        new_drbd, False,
3864 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3865 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3866 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3867 a9e0c397 Iustin Pop
3868 0834c866 Iustin Pop
    for dev in instance.disks:
3869 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3870 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3871 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3872 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3873 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3874 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3875 a9e0c397 Iustin Pop
3876 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3877 642445d9 Iustin Pop
    done = 0
3878 642445d9 Iustin Pop
    for dev in instance.disks:
3879 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3880 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3881 642445d9 Iustin Pop
      # detach from network
3882 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3883 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3884 642445d9 Iustin Pop
      # standalone state
3885 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3886 642445d9 Iustin Pop
        done += 1
3887 642445d9 Iustin Pop
      else:
3888 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3889 642445d9 Iustin Pop
                dev.iv_name)
3890 642445d9 Iustin Pop
3891 642445d9 Iustin Pop
    if not done:
3892 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3893 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3894 642445d9 Iustin Pop
3895 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3896 642445d9 Iustin Pop
    # the instance to point to the new secondary
3897 642445d9 Iustin Pop
    info("updating instance configuration")
3898 642445d9 Iustin Pop
    for dev in instance.disks:
3899 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3900 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3901 642445d9 Iustin Pop
    cfg.Update(instance)
3902 a9e0c397 Iustin Pop
3903 642445d9 Iustin Pop
    # and now perform the drbd attach
3904 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3905 642445d9 Iustin Pop
    failures = []
3906 642445d9 Iustin Pop
    for dev in instance.disks:
3907 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3908 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3909 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3910 642445d9 Iustin Pop
      # is correct
3911 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3912 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3913 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3914 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3915 a9e0c397 Iustin Pop
3916 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3917 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3918 a9e0c397 Iustin Pop
    # return value
3919 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3920 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3921 a9e0c397 Iustin Pop
3922 a9e0c397 Iustin Pop
    # so check manually all the devices
3923 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3924 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3925 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3926 a9e0c397 Iustin Pop
      if is_degr:
3927 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3928 a9e0c397 Iustin Pop
3929 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3930 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3931 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3932 a9e0c397 Iustin Pop
      for lv in old_lvs:
3933 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3934 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3935 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3936 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3937 a9e0c397 Iustin Pop
3938 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3939 a9e0c397 Iustin Pop
    """Execute disk replacement.
3940 a9e0c397 Iustin Pop

3941 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3942 a9e0c397 Iustin Pop

3943 a9e0c397 Iustin Pop
    """
3944 a9e0c397 Iustin Pop
    instance = self.instance
3945 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_REMOTE_RAID1:
3946 a9e0c397 Iustin Pop
      fn = self._ExecRR1
3947 a9e0c397 Iustin Pop
    elif instance.disk_template == constants.DT_DRBD8:
3948 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3949 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3950 a9e0c397 Iustin Pop
      else:
3951 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3952 a9e0c397 Iustin Pop
    else:
3953 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3954 a9e0c397 Iustin Pop
    return fn(feedback_fn)
3955 a9e0c397 Iustin Pop
3956 a8083063 Iustin Pop
3957 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3958 a8083063 Iustin Pop
  """Query runtime instance data.
3959 a8083063 Iustin Pop

3960 a8083063 Iustin Pop
  """
3961 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3962 a8083063 Iustin Pop
3963 a8083063 Iustin Pop
  def CheckPrereq(self):
3964 a8083063 Iustin Pop
    """Check prerequisites.
3965 a8083063 Iustin Pop

3966 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3967 a8083063 Iustin Pop

3968 a8083063 Iustin Pop
    """
3969 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3970 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3971 a8083063 Iustin Pop
    if self.op.instances:
3972 a8083063 Iustin Pop
      self.wanted_instances = []
3973 a8083063 Iustin Pop
      names = self.op.instances
3974 a8083063 Iustin Pop
      for name in names:
3975 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3976 a8083063 Iustin Pop
        if instance is None:
3977 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3978 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3979 a8083063 Iustin Pop
    else:
3980 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3981 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3982 a8083063 Iustin Pop
    return
3983 a8083063 Iustin Pop
3984 a8083063 Iustin Pop
3985 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3986 a8083063 Iustin Pop
    """Compute block device status.
3987 a8083063 Iustin Pop

3988 a8083063 Iustin Pop
    """
3989 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3990 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3991 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
3992 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3993 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3994 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3995 a8083063 Iustin Pop
      else:
3996 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3997 a8083063 Iustin Pop
3998 a8083063 Iustin Pop
    if snode:
3999 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4000 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4001 a8083063 Iustin Pop
    else:
4002 a8083063 Iustin Pop
      dev_sstatus = None
4003 a8083063 Iustin Pop
4004 a8083063 Iustin Pop
    if dev.children:
4005 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4006 a8083063 Iustin Pop
                      for child in dev.children]
4007 a8083063 Iustin Pop
    else:
4008 a8083063 Iustin Pop
      dev_children = []
4009 a8083063 Iustin Pop
4010 a8083063 Iustin Pop
    data = {
4011 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4012 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4013 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4014 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4015 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4016 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4017 a8083063 Iustin Pop
      "children": dev_children,
4018 a8083063 Iustin Pop
      }
4019 a8083063 Iustin Pop
4020 a8083063 Iustin Pop
    return data
4021 a8083063 Iustin Pop
4022 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4023 a8083063 Iustin Pop
    """Gather and return data"""
4024 a8083063 Iustin Pop
    result = {}
4025 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4026 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4027 a8083063 Iustin Pop
                                                instance.name)
4028 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4029 a8083063 Iustin Pop
        remote_state = "up"
4030 a8083063 Iustin Pop
      else:
4031 a8083063 Iustin Pop
        remote_state = "down"
4032 a8083063 Iustin Pop
      if instance.status == "down":
4033 a8083063 Iustin Pop
        config_state = "down"
4034 a8083063 Iustin Pop
      else:
4035 a8083063 Iustin Pop
        config_state = "up"
4036 a8083063 Iustin Pop
4037 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4038 a8083063 Iustin Pop
               for device in instance.disks]
4039 a8083063 Iustin Pop
4040 a8083063 Iustin Pop
      idict = {
4041 a8083063 Iustin Pop
        "name": instance.name,
4042 a8083063 Iustin Pop
        "config_state": config_state,
4043 a8083063 Iustin Pop
        "run_state": remote_state,
4044 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4045 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4046 a8083063 Iustin Pop
        "os": instance.os,
4047 a8083063 Iustin Pop
        "memory": instance.memory,
4048 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4049 a8083063 Iustin Pop
        "disks": disks,
4050 58acb49d Alexander Schreiber
        "network_port": instance.network_port,
4051 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4052 a8083063 Iustin Pop
        }
4053 a8083063 Iustin Pop
4054 a8083063 Iustin Pop
      result[instance.name] = idict
4055 a8083063 Iustin Pop
4056 a8083063 Iustin Pop
    return result
4057 a8083063 Iustin Pop
4058 a8083063 Iustin Pop
4059 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
4060 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4061 a8083063 Iustin Pop

4062 a8083063 Iustin Pop
  """
4063 a8083063 Iustin Pop
  HPATH = "instance-modify"
4064 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4065 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4066 a8083063 Iustin Pop
4067 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4068 a8083063 Iustin Pop
    """Build hooks env.
4069 a8083063 Iustin Pop

4070 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4071 a8083063 Iustin Pop

4072 a8083063 Iustin Pop
    """
4073 396e1b78 Michael Hanselmann
    args = dict()
4074 a8083063 Iustin Pop
    if self.mem:
4075 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4076 a8083063 Iustin Pop
    if self.vcpus:
4077 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4078 396e1b78 Michael Hanselmann
    if self.do_ip or self.do_bridge:
4079 396e1b78 Michael Hanselmann
      if self.do_ip:
4080 396e1b78 Michael Hanselmann
        ip = self.ip
4081 396e1b78 Michael Hanselmann
      else:
4082 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4083 396e1b78 Michael Hanselmann
      if self.bridge:
4084 396e1b78 Michael Hanselmann
        bridge = self.bridge
4085 396e1b78 Michael Hanselmann
      else:
4086 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4087 396e1b78 Michael Hanselmann
      args['nics'] = [(ip, bridge)]
4088 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4089 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4090 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4091 a8083063 Iustin Pop
    return env, nl, nl
4092 a8083063 Iustin Pop
4093 a8083063 Iustin Pop
  def CheckPrereq(self):
4094 a8083063 Iustin Pop
    """Check prerequisites.
4095 a8083063 Iustin Pop

4096 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4097 a8083063 Iustin Pop

4098 a8083063 Iustin Pop
    """
4099 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4100 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4101 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4102 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4103 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4104 1862d460 Alexander Schreiber
    if [self.mem, self.vcpus, self.ip, self.bridge, self.mac].count(None) == 5:
4105 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4106 a8083063 Iustin Pop
    if self.mem is not None:
4107 a8083063 Iustin Pop
      try:
4108 a8083063 Iustin Pop
        self.mem = int(self.mem)
4109 a8083063 Iustin Pop
      except ValueError, err:
4110 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4111 a8083063 Iustin Pop
    if self.vcpus is not None:
4112 a8083063 Iustin Pop
      try:
4113 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4114 a8083063 Iustin Pop
      except ValueError, err:
4115 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4116 a8083063 Iustin Pop
    if self.ip is not None:
4117 a8083063 Iustin Pop
      self.do_ip = True
4118 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4119 a8083063 Iustin Pop
        self.ip = None
4120 a8083063 Iustin Pop
      else:
4121 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4122 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4123 a8083063 Iustin Pop
    else:
4124 a8083063 Iustin Pop
      self.do_ip = False
4125 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4126 1862d460 Alexander Schreiber
    if self.mac is not None:
4127 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4128 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4129 1862d460 Alexander Schreiber
                                   self.mac)
4130 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4131 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4132 a8083063 Iustin Pop
4133 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4134 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4135 a8083063 Iustin Pop
    if instance is None:
4136 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
4137 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4138 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4139 a8083063 Iustin Pop
    self.instance = instance
4140 a8083063 Iustin Pop
    return
4141 a8083063 Iustin Pop
4142 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4143 a8083063 Iustin Pop
    """Modifies an instance.
4144 a8083063 Iustin Pop

4145 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4146 a8083063 Iustin Pop
    """
4147 a8083063 Iustin Pop
    result = []
4148 a8083063 Iustin Pop
    instance = self.instance
4149 a8083063 Iustin Pop
    if self.mem:
4150 a8083063 Iustin Pop
      instance.memory = self.mem
4151 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4152 a8083063 Iustin Pop
    if self.vcpus:
4153 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4154 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4155 a8083063 Iustin Pop
    if self.do_ip:
4156 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4157 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4158 a8083063 Iustin Pop
    if self.bridge:
4159 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4160 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4161 1862d460 Alexander Schreiber
    if self.mac:
4162 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4163 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4164 a8083063 Iustin Pop
4165 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4166 a8083063 Iustin Pop
4167 a8083063 Iustin Pop
    return result
4168 a8083063 Iustin Pop
4169 a8083063 Iustin Pop
4170 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4171 a8083063 Iustin Pop
  """Query the exports list
4172 a8083063 Iustin Pop

4173 a8083063 Iustin Pop
  """
4174 a8083063 Iustin Pop
  _OP_REQP = []
4175 a8083063 Iustin Pop
4176 a8083063 Iustin Pop
  def CheckPrereq(self):
4177 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4178 a8083063 Iustin Pop

4179 a8083063 Iustin Pop
    """
4180 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4181 a8083063 Iustin Pop
4182 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4183 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4184 a8083063 Iustin Pop

4185 a8083063 Iustin Pop
    Returns:
4186 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4187 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4188 a8083063 Iustin Pop
      that node.
4189 a8083063 Iustin Pop

4190 a8083063 Iustin Pop
    """
4191 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4192 a8083063 Iustin Pop
4193 a8083063 Iustin Pop
4194 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4195 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4196 a8083063 Iustin Pop

4197 a8083063 Iustin Pop
  """
4198 a8083063 Iustin Pop
  HPATH = "instance-export"
4199 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4200 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4201 a8083063 Iustin Pop
4202 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4203 a8083063 Iustin Pop
    """Build hooks env.
4204 a8083063 Iustin Pop

4205 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4206 a8083063 Iustin Pop

4207 a8083063 Iustin Pop
    """
4208 a8083063 Iustin Pop
    env = {
4209 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4210 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4211 a8083063 Iustin Pop
      }
4212 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4213 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4214 a8083063 Iustin Pop
          self.op.target_node]
4215 a8083063 Iustin Pop
    return env, nl, nl
4216 a8083063 Iustin Pop
4217 a8083063 Iustin Pop
  def CheckPrereq(self):
4218 a8083063 Iustin Pop
    """Check prerequisites.
4219 a8083063 Iustin Pop

4220 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
4221 a8083063 Iustin Pop

4222 a8083063 Iustin Pop
    """
4223 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4224 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4225 a8083063 Iustin Pop
    if self.instance is None:
4226 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4227 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4228 a8083063 Iustin Pop
4229 a8083063 Iustin Pop
    # node verification
4230 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4231 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4232 a8083063 Iustin Pop
4233 a8083063 Iustin Pop
    if self.dst_node is None:
4234 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4235 3ecf6786 Iustin Pop
                                 self.op.target_node)
4236 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4237 a8083063 Iustin Pop
4238 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4239 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4240 a8083063 Iustin Pop

4241 a8083063 Iustin Pop
    """
4242 a8083063 Iustin Pop
    instance = self.instance
4243 a8083063 Iustin Pop
    dst_node = self.dst_node
4244 a8083063 Iustin Pop
    src_node = instance.primary_node
4245 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
4246 a8083063 Iustin Pop
    if self.op.shutdown:
4247 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
4248 5bfac263 Iustin Pop
      self.proc.ChainOpCode(op)
4249 a8083063 Iustin Pop
4250 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4251 a8083063 Iustin Pop
4252 a8083063 Iustin Pop
    snap_disks = []
4253 a8083063 Iustin Pop
4254 a8083063 Iustin Pop
    try:
4255 a8083063 Iustin Pop
      for disk in instance.disks:
4256 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4257 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4258 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4259 a8083063 Iustin Pop
4260 a8083063 Iustin Pop
          if not new_dev_name:
4261 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4262 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4263 a8083063 Iustin Pop
          else:
4264 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4265 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4266 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4267 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4268 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4269 a8083063 Iustin Pop
4270 a8083063 Iustin Pop
    finally:
4271 a8083063 Iustin Pop
      if self.op.shutdown:
4272 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
4273 a8083063 Iustin Pop
                                       force=False)
4274 5bfac263 Iustin Pop
        self.proc.ChainOpCode(op)
4275 a8083063 Iustin Pop
4276 a8083063 Iustin Pop
    # TODO: check for size
4277 a8083063 Iustin Pop
4278 a8083063 Iustin Pop
    for dev in snap_disks:
4279 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
4280 a8083063 Iustin Pop
                                           instance):
4281 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
4282 a8083063 Iustin Pop
                     " %s to node %s" %
4283 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
4284 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4285 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
4286 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
4287 a8083063 Iustin Pop
4288 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4289 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4290 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4291 a8083063 Iustin Pop
4292 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4293 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4294 a8083063 Iustin Pop
4295 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4296 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4297 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4298 a8083063 Iustin Pop
    if nodelist:
4299 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4300 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4301 a8083063 Iustin Pop
      for node in exportlist:
4302 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4303 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4304 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4305 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4306 5c947f38 Iustin Pop
4307 5c947f38 Iustin Pop
4308 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4309 5c947f38 Iustin Pop
  """Generic tags LU.
4310 5c947f38 Iustin Pop

4311 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4312 5c947f38 Iustin Pop

4313 5c947f38 Iustin Pop
  """
4314 5c947f38 Iustin Pop
  def CheckPrereq(self):
4315 5c947f38 Iustin Pop
    """Check prerequisites.
4316 5c947f38 Iustin Pop

4317 5c947f38 Iustin Pop
    """
4318 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4319 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4320 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4321 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4322 5c947f38 Iustin Pop
      if name is None:
4323 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4324 3ecf6786 Iustin Pop
                                   (self.op.name,))
4325 5c947f38 Iustin Pop
      self.op.name = name
4326 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4327 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4328 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4329 5c947f38 Iustin Pop
      if name is None:
4330 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4331 3ecf6786 Iustin Pop
                                   (self.op.name,))
4332 5c947f38 Iustin Pop
      self.op.name = name
4333 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4334 5c947f38 Iustin Pop
    else:
4335 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4336 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4337 5c947f38 Iustin Pop
4338 5c947f38 Iustin Pop
4339 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4340 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4341 5c947f38 Iustin Pop

4342 5c947f38 Iustin Pop
  """
4343 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4344 5c947f38 Iustin Pop
4345 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4346 5c947f38 Iustin Pop
    """Returns the tag list.
4347 5c947f38 Iustin Pop

4348 5c947f38 Iustin Pop
    """
4349 5c947f38 Iustin Pop
    return self.target.GetTags()
4350 5c947f38 Iustin Pop
4351 5c947f38 Iustin Pop
4352 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4353 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4354 73415719 Iustin Pop

4355 73415719 Iustin Pop
  """
4356 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4357 73415719 Iustin Pop
4358 73415719 Iustin Pop
  def CheckPrereq(self):
4359 73415719 Iustin Pop
    """Check prerequisites.
4360 73415719 Iustin Pop

4361 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4362 73415719 Iustin Pop

4363 73415719 Iustin Pop
    """
4364 73415719 Iustin Pop
    try:
4365 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4366 73415719 Iustin Pop
    except re.error, err:
4367 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4368 73415719 Iustin Pop
                                 (self.op.pattern, err))
4369 73415719 Iustin Pop
4370 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4371 73415719 Iustin Pop
    """Returns the tag list.
4372 73415719 Iustin Pop

4373 73415719 Iustin Pop
    """
4374 73415719 Iustin Pop
    cfg = self.cfg
4375 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4376 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4377 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4378 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4379 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4380 73415719 Iustin Pop
    results = []
4381 73415719 Iustin Pop
    for path, target in tgts:
4382 73415719 Iustin Pop
      for tag in target.GetTags():
4383 73415719 Iustin Pop
        if self.re.search(tag):
4384 73415719 Iustin Pop
          results.append((path, tag))
4385 73415719 Iustin Pop
    return results
4386 73415719 Iustin Pop
4387 73415719 Iustin Pop
4388 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4389 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4390 5c947f38 Iustin Pop

4391 5c947f38 Iustin Pop
  """
4392 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4393 5c947f38 Iustin Pop
4394 5c947f38 Iustin Pop
  def CheckPrereq(self):
4395 5c947f38 Iustin Pop
    """Check prerequisites.
4396 5c947f38 Iustin Pop

4397 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4398 5c947f38 Iustin Pop

4399 5c947f38 Iustin Pop
    """
4400 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4401 f27302fa Iustin Pop
    for tag in self.op.tags:
4402 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4403 5c947f38 Iustin Pop
4404 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4405 5c947f38 Iustin Pop
    """Sets the tag.
4406 5c947f38 Iustin Pop

4407 5c947f38 Iustin Pop
    """
4408 5c947f38 Iustin Pop
    try:
4409 f27302fa Iustin Pop
      for tag in self.op.tags:
4410 f27302fa Iustin Pop
        self.target.AddTag(tag)
4411 5c947f38 Iustin Pop
    except errors.TagError, err:
4412 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4413 5c947f38 Iustin Pop
    try:
4414 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4415 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4416 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4417 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4418 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4419 5c947f38 Iustin Pop
4420 5c947f38 Iustin Pop
4421 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4422 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4423 5c947f38 Iustin Pop

4424 5c947f38 Iustin Pop
  """
4425 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4426 5c947f38 Iustin Pop
4427 5c947f38 Iustin Pop
  def CheckPrereq(self):
4428 5c947f38 Iustin Pop
    """Check prerequisites.
4429 5c947f38 Iustin Pop

4430 5c947f38 Iustin Pop
    This checks that we have the given tag.
4431 5c947f38 Iustin Pop

4432 5c947f38 Iustin Pop
    """
4433 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4434 f27302fa Iustin Pop
    for tag in self.op.tags:
4435 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4436 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4437 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4438 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4439 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4440 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4441 f27302fa Iustin Pop
      diff_names.sort()
4442 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4443 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4444 5c947f38 Iustin Pop
4445 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4446 5c947f38 Iustin Pop
    """Remove the tag from the object.
4447 5c947f38 Iustin Pop

4448 5c947f38 Iustin Pop
    """
4449 f27302fa Iustin Pop
    for tag in self.op.tags:
4450 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4451 5c947f38 Iustin Pop
    try:
4452 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4453 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4454 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4455 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4456 3ecf6786 Iustin Pop
                                " aborted. Please retry.")