Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 41a57aab

History | View | Annotate | Download (153.5 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 a8083063 Iustin Pop
46 7c0d6283 Michael Hanselmann
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
52 a8083063 Iustin Pop
      with all the fields (even if as None)
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
57 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 a8083063 Iustin Pop
  REQ_CLUSTER = True
64 a8083063 Iustin Pop
  REQ_MASTER = True
65 a8083063 Iustin Pop
66 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
67 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
68 a8083063 Iustin Pop

69 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
70 a8083063 Iustin Pop
    validity.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    """
73 5bfac263 Iustin Pop
    self.proc = processor
74 a8083063 Iustin Pop
    self.op = op
75 a8083063 Iustin Pop
    self.cfg = cfg
76 a8083063 Iustin Pop
    self.sstore = sstore
77 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
78 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
79 a8083063 Iustin Pop
      if attr_val is None:
80 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
81 3ecf6786 Iustin Pop
                                   attr_name)
82 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
83 a8083063 Iustin Pop
      if not cfg.IsCluster():
84 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
85 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
86 a8083063 Iustin Pop
      if self.REQ_MASTER:
87 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
88 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
89 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
90 3ecf6786 Iustin Pop
                                     " node %s" % master)
91 a8083063 Iustin Pop
92 a8083063 Iustin Pop
  def CheckPrereq(self):
93 a8083063 Iustin Pop
    """Check prerequisites for this LU.
94 a8083063 Iustin Pop

95 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
96 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
97 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
98 a8083063 Iustin Pop
    allowed.
99 a8083063 Iustin Pop

100 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
101 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
102 a8083063 Iustin Pop

103 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
104 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
105 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
106 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
107 a8083063 Iustin Pop

108 a8083063 Iustin Pop
    """
109 a8083063 Iustin Pop
    raise NotImplementedError
110 a8083063 Iustin Pop
111 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
112 a8083063 Iustin Pop
    """Execute the LU.
113 a8083063 Iustin Pop

114 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
115 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
116 a8083063 Iustin Pop
    code, or expected.
117 a8083063 Iustin Pop

118 a8083063 Iustin Pop
    """
119 a8083063 Iustin Pop
    raise NotImplementedError
120 a8083063 Iustin Pop
121 a8083063 Iustin Pop
  def BuildHooksEnv(self):
122 a8083063 Iustin Pop
    """Build hooks environment for this LU.
123 a8083063 Iustin Pop

124 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
125 a8083063 Iustin Pop
    containing the environment that will be used for running the
126 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
127 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
128 a8083063 Iustin Pop
    the hook should run after the execution.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
131 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
132 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
133 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
134 a8083063 Iustin Pop

135 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
136 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
137 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
138 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
139 a8083063 Iustin Pop
    None).
140 a8083063 Iustin Pop

141 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
142 a8083063 Iustin Pop
    not be called.
143 a8083063 Iustin Pop

144 a8083063 Iustin Pop
    """
145 a8083063 Iustin Pop
    raise NotImplementedError
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
148 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
149 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
150 a8083063 Iustin Pop

151 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
152 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
153 a8083063 Iustin Pop

154 a8083063 Iustin Pop
  """
155 a8083063 Iustin Pop
  HPATH = None
156 a8083063 Iustin Pop
  HTYPE = None
157 a8083063 Iustin Pop
158 a8083063 Iustin Pop
  def BuildHooksEnv(self):
159 a8083063 Iustin Pop
    """Build hooks env.
160 a8083063 Iustin Pop

161 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
162 a8083063 Iustin Pop

163 a8083063 Iustin Pop
    """
164 0e137c28 Iustin Pop
    return {}, [], []
165 a8083063 Iustin Pop
166 a8083063 Iustin Pop
167 9440aeab Michael Hanselmann
def _AddHostToEtcHosts(hostname):
168 9440aeab Michael Hanselmann
  """Wrapper around utils.SetEtcHostsEntry.
169 9440aeab Michael Hanselmann

170 9440aeab Michael Hanselmann
  """
171 9440aeab Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
172 9440aeab Michael Hanselmann
  utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
173 9440aeab Michael Hanselmann
174 9440aeab Michael Hanselmann
175 c8a0948f Michael Hanselmann
def _RemoveHostFromEtcHosts(hostname):
176 9440aeab Michael Hanselmann
  """Wrapper around utils.RemoveEtcHostsEntry.
177 c8a0948f Michael Hanselmann

178 c8a0948f Michael Hanselmann
  """
179 c8a0948f Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
180 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
181 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
182 c8a0948f Michael Hanselmann
183 c8a0948f Michael Hanselmann
184 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
185 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
186 83120a01 Michael Hanselmann

187 83120a01 Michael Hanselmann
  Args:
188 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
189 83120a01 Michael Hanselmann

190 83120a01 Michael Hanselmann
  """
191 3312b702 Iustin Pop
  if not isinstance(nodes, list):
192 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
193 dcb93971 Michael Hanselmann
194 dcb93971 Michael Hanselmann
  if nodes:
195 3312b702 Iustin Pop
    wanted = []
196 dcb93971 Michael Hanselmann
197 dcb93971 Michael Hanselmann
    for name in nodes:
198 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
199 dcb93971 Michael Hanselmann
      if node is None:
200 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
201 3312b702 Iustin Pop
      wanted.append(node)
202 dcb93971 Michael Hanselmann
203 dcb93971 Michael Hanselmann
  else:
204 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
205 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
206 3312b702 Iustin Pop
207 3312b702 Iustin Pop
208 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
209 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
210 3312b702 Iustin Pop

211 3312b702 Iustin Pop
  Args:
212 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
213 3312b702 Iustin Pop

214 3312b702 Iustin Pop
  """
215 3312b702 Iustin Pop
  if not isinstance(instances, list):
216 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
217 3312b702 Iustin Pop
218 3312b702 Iustin Pop
  if instances:
219 3312b702 Iustin Pop
    wanted = []
220 3312b702 Iustin Pop
221 3312b702 Iustin Pop
    for name in instances:
222 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
223 3312b702 Iustin Pop
      if instance is None:
224 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
225 3312b702 Iustin Pop
      wanted.append(instance)
226 3312b702 Iustin Pop
227 3312b702 Iustin Pop
  else:
228 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
229 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
230 dcb93971 Michael Hanselmann
231 dcb93971 Michael Hanselmann
232 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
233 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
234 83120a01 Michael Hanselmann

235 83120a01 Michael Hanselmann
  Args:
236 83120a01 Michael Hanselmann
    static: Static fields
237 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
238 83120a01 Michael Hanselmann

239 83120a01 Michael Hanselmann
  """
240 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
241 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
242 dcb93971 Michael Hanselmann
243 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
244 dcb93971 Michael Hanselmann
245 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
246 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
247 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
248 3ecf6786 Iustin Pop
                                          difference(all_fields)))
249 dcb93971 Michael Hanselmann
250 dcb93971 Michael Hanselmann
251 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
252 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
253 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
254 ecb215b5 Michael Hanselmann

255 ecb215b5 Michael Hanselmann
  Args:
256 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
257 396e1b78 Michael Hanselmann
  """
258 396e1b78 Michael Hanselmann
  env = {
259 0e137c28 Iustin Pop
    "OP_TARGET": name,
260 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
261 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
262 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
263 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
264 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
265 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
266 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
267 396e1b78 Michael Hanselmann
  }
268 396e1b78 Michael Hanselmann
269 396e1b78 Michael Hanselmann
  if nics:
270 396e1b78 Michael Hanselmann
    nic_count = len(nics)
271 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
272 396e1b78 Michael Hanselmann
      if ip is None:
273 396e1b78 Michael Hanselmann
        ip = ""
274 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
275 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
276 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
277 396e1b78 Michael Hanselmann
  else:
278 396e1b78 Michael Hanselmann
    nic_count = 0
279 396e1b78 Michael Hanselmann
280 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
281 396e1b78 Michael Hanselmann
282 396e1b78 Michael Hanselmann
  return env
283 396e1b78 Michael Hanselmann
284 396e1b78 Michael Hanselmann
285 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
286 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
287 ecb215b5 Michael Hanselmann

288 ecb215b5 Michael Hanselmann
  Args:
289 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
290 ecb215b5 Michael Hanselmann
    override: dict of values to override
291 ecb215b5 Michael Hanselmann
  """
292 396e1b78 Michael Hanselmann
  args = {
293 396e1b78 Michael Hanselmann
    'name': instance.name,
294 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
295 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
296 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
297 396e1b78 Michael Hanselmann
    'status': instance.os,
298 396e1b78 Michael Hanselmann
    'memory': instance.memory,
299 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
300 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
301 396e1b78 Michael Hanselmann
  }
302 396e1b78 Michael Hanselmann
  if override:
303 396e1b78 Michael Hanselmann
    args.update(override)
304 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
305 396e1b78 Michael Hanselmann
306 396e1b78 Michael Hanselmann
307 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
308 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
309 a8083063 Iustin Pop

310 a8083063 Iustin Pop
  Args:
311 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
312 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
313 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
314 a8083063 Iustin Pop

315 a8083063 Iustin Pop
  """
316 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
317 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
318 a8083063 Iustin Pop
  else:
319 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
320 a8083063 Iustin Pop
321 a8083063 Iustin Pop
  inthere = False
322 a8083063 Iustin Pop
323 a8083063 Iustin Pop
  save_lines = []
324 a8083063 Iustin Pop
  add_lines = []
325 a8083063 Iustin Pop
  removed = False
326 a8083063 Iustin Pop
327 4cc2a728 Michael Hanselmann
  for rawline in f:
328 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
329 a8083063 Iustin Pop
330 4cc2a728 Michael Hanselmann
    parts = rawline.rstrip('\r\n').split()
331 4cc2a728 Michael Hanselmann
332 4cc2a728 Michael Hanselmann
    # Ignore unwanted lines
333 4cc2a728 Michael Hanselmann
    if len(parts) >= 3 and not rawline.lstrip()[0] == '#':
334 4cc2a728 Michael Hanselmann
      fields = parts[0].split(',')
335 4cc2a728 Michael Hanselmann
      key = parts[2]
336 4cc2a728 Michael Hanselmann
337 4cc2a728 Michael Hanselmann
      haveall = True
338 4cc2a728 Michael Hanselmann
      havesome = False
339 4cc2a728 Michael Hanselmann
      for spec in [ ip, fullnode ]:
340 4cc2a728 Michael Hanselmann
        if spec not in fields:
341 4cc2a728 Michael Hanselmann
          haveall = False
342 4cc2a728 Michael Hanselmann
        if spec in fields:
343 4cc2a728 Michael Hanselmann
          havesome = True
344 4cc2a728 Michael Hanselmann
345 4cc2a728 Michael Hanselmann
      logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
346 4cc2a728 Michael Hanselmann
      if haveall and key == pubkey:
347 4cc2a728 Michael Hanselmann
        inthere = True
348 4cc2a728 Michael Hanselmann
        save_lines.append(rawline)
349 4cc2a728 Michael Hanselmann
        logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
350 4cc2a728 Michael Hanselmann
        continue
351 4cc2a728 Michael Hanselmann
352 4cc2a728 Michael Hanselmann
      if havesome and (not haveall or key != pubkey):
353 4cc2a728 Michael Hanselmann
        removed = True
354 4cc2a728 Michael Hanselmann
        logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
355 4cc2a728 Michael Hanselmann
        continue
356 a8083063 Iustin Pop
357 a8083063 Iustin Pop
    save_lines.append(rawline)
358 a8083063 Iustin Pop
359 a8083063 Iustin Pop
  if not inthere:
360 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
361 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
362 a8083063 Iustin Pop
363 a8083063 Iustin Pop
  if removed:
364 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
365 a8083063 Iustin Pop
366 a8083063 Iustin Pop
    # Write a new file and replace old.
367 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
368 82122173 Iustin Pop
                                   constants.DATA_DIR)
369 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
370 82122173 Iustin Pop
    try:
371 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
372 82122173 Iustin Pop
    finally:
373 82122173 Iustin Pop
      newfile.close()
374 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
375 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
376 a8083063 Iustin Pop
377 a8083063 Iustin Pop
  elif add_lines:
378 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
379 a8083063 Iustin Pop
    f.seek(0, 2)
380 a8083063 Iustin Pop
    for add in add_lines:
381 a8083063 Iustin Pop
      f.write(add)
382 a8083063 Iustin Pop
383 a8083063 Iustin Pop
  f.close()
384 a8083063 Iustin Pop
385 a8083063 Iustin Pop
386 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
387 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
388 a8083063 Iustin Pop

389 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
390 a8083063 Iustin Pop
  is the error message.
391 a8083063 Iustin Pop

392 a8083063 Iustin Pop
  """
393 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
394 a8083063 Iustin Pop
  if vgsize is None:
395 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
396 a8083063 Iustin Pop
  elif vgsize < 20480:
397 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
398 191a8385 Guido Trotter
            (vgname, vgsize))
399 a8083063 Iustin Pop
  return None
400 a8083063 Iustin Pop
401 a8083063 Iustin Pop
402 a8083063 Iustin Pop
def _InitSSHSetup(node):
403 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
404 a8083063 Iustin Pop

405 a8083063 Iustin Pop

406 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
407 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
408 a8083063 Iustin Pop

409 a8083063 Iustin Pop
  Args:
410 a8083063 Iustin Pop
    node: the name of this host as a fqdn
411 a8083063 Iustin Pop

412 a8083063 Iustin Pop
  """
413 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
414 a8083063 Iustin Pop
415 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
416 70d9e3d8 Iustin Pop
    if os.path.exists(name):
417 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
418 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
419 a8083063 Iustin Pop
420 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
421 70d9e3d8 Iustin Pop
                         "-f", priv_key,
422 a8083063 Iustin Pop
                         "-q", "-N", ""])
423 a8083063 Iustin Pop
  if result.failed:
424 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
425 3ecf6786 Iustin Pop
                             result.output)
426 a8083063 Iustin Pop
427 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
428 a8083063 Iustin Pop
  try:
429 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
430 a8083063 Iustin Pop
  finally:
431 a8083063 Iustin Pop
    f.close()
432 a8083063 Iustin Pop
433 a8083063 Iustin Pop
434 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
435 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
436 a8083063 Iustin Pop

437 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
438 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
439 a8083063 Iustin Pop

440 a8083063 Iustin Pop
  """
441 a8083063 Iustin Pop
  # Create pseudo random password
442 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
443 a8083063 Iustin Pop
  # and write it into sstore
444 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
445 a8083063 Iustin Pop
446 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
447 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
448 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
449 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
450 a8083063 Iustin Pop
  if result.failed:
451 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
452 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
453 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
454 a8083063 Iustin Pop
455 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
456 a8083063 Iustin Pop
457 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
458 a8083063 Iustin Pop
459 a8083063 Iustin Pop
  if result.failed:
460 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
461 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
462 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
463 a8083063 Iustin Pop
464 a8083063 Iustin Pop
465 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
466 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
467 bf6929a2 Alexander Schreiber

468 bf6929a2 Alexander Schreiber
  """
469 bf6929a2 Alexander Schreiber
  # check bridges existance
470 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
471 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
472 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
473 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
474 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
475 bf6929a2 Alexander Schreiber
476 bf6929a2 Alexander Schreiber
477 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
478 a8083063 Iustin Pop
  """Initialise the cluster.
479 a8083063 Iustin Pop

480 a8083063 Iustin Pop
  """
481 a8083063 Iustin Pop
  HPATH = "cluster-init"
482 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
483 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
484 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
485 a8083063 Iustin Pop
  REQ_CLUSTER = False
486 a8083063 Iustin Pop
487 a8083063 Iustin Pop
  def BuildHooksEnv(self):
488 a8083063 Iustin Pop
    """Build hooks env.
489 a8083063 Iustin Pop

490 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
491 a8083063 Iustin Pop
    ourselves in the post-run node list.
492 a8083063 Iustin Pop

493 a8083063 Iustin Pop
    """
494 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
495 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
496 a8083063 Iustin Pop
497 a8083063 Iustin Pop
  def CheckPrereq(self):
498 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
499 a8083063 Iustin Pop

500 a8083063 Iustin Pop
    """
501 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
502 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
503 a8083063 Iustin Pop
504 2a6469d5 Alexander Schreiber
    if self.op.hypervisor_type == constants.HT_XEN_HVM31:
505 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
506 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Please prepare the cluster VNC"
507 2a6469d5 Alexander Schreiber
                                   "password file %s" %
508 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
509 2a6469d5 Alexander Schreiber
510 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
511 ff98055b Iustin Pop
512 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
513 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
514 107711b0 Michael Hanselmann
                                 " range (%s). Please fix DNS or %s." %
515 107711b0 Michael Hanselmann
                                 (hostname.ip, constants.ETC_HOSTS))
516 130e907e Iustin Pop
517 89e1fc26 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
518 a8083063 Iustin Pop
519 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, hostname.ip,
520 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
521 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
522 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
523 3ecf6786 Iustin Pop
                                 " belong to this host."
524 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
525 a8083063 Iustin Pop
526 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
527 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
528 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
529 16abfbc2 Alexander Schreiber
    if (secondary_ip and
530 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
531 16abfbc2 Alexander Schreiber
        (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
532 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT))):
533 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("You gave %s as secondary IP,"
534 f4bc1f2c Michael Hanselmann
                                 " but it does not belong to this host." %
535 16abfbc2 Alexander Schreiber
                                 secondary_ip)
536 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
537 a8083063 Iustin Pop
538 a8083063 Iustin Pop
    # checks presence of the volume group given
539 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
540 a8083063 Iustin Pop
541 a8083063 Iustin Pop
    if vgstatus:
542 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
543 a8083063 Iustin Pop
544 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
545 a8083063 Iustin Pop
                    self.op.mac_prefix):
546 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
547 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
548 a8083063 Iustin Pop
549 2584d4a4 Alexander Schreiber
    if self.op.hypervisor_type not in constants.HYPER_TYPES:
550 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
551 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
552 a8083063 Iustin Pop
553 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
554 880478f8 Iustin Pop
    if result.failed:
555 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
556 8925faaa Iustin Pop
                                 (self.op.master_netdev,
557 8925faaa Iustin Pop
                                  result.output.strip()))
558 880478f8 Iustin Pop
559 7dd30006 Michael Hanselmann
    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
560 7dd30006 Michael Hanselmann
            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
561 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("Init.d script '%s' missing or not"
562 f4bc1f2c Michael Hanselmann
                                 " executable." % constants.NODE_INITD_SCRIPT)
563 c7b46d59 Iustin Pop
564 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
565 a8083063 Iustin Pop
    """Initialize the cluster.
566 a8083063 Iustin Pop

567 a8083063 Iustin Pop
    """
568 a8083063 Iustin Pop
    clustername = self.clustername
569 a8083063 Iustin Pop
    hostname = self.hostname
570 a8083063 Iustin Pop
571 a8083063 Iustin Pop
    # set up the simple store
572 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
573 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
574 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
575 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
576 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
577 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
578 a8083063 Iustin Pop
579 a8083063 Iustin Pop
    # set up the inter-node password and certificate
580 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
581 a8083063 Iustin Pop
582 a8083063 Iustin Pop
    # start the master ip
583 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
584 a8083063 Iustin Pop
585 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
586 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
587 a8083063 Iustin Pop
    try:
588 a8083063 Iustin Pop
      sshline = f.read()
589 a8083063 Iustin Pop
    finally:
590 a8083063 Iustin Pop
      f.close()
591 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
592 a8083063 Iustin Pop
593 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(hostname.name)
594 a8083063 Iustin Pop
595 bcf043c9 Iustin Pop
    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
596 a8083063 Iustin Pop
597 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
598 a8083063 Iustin Pop
599 a8083063 Iustin Pop
    # init of cluster config file
600 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
601 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
602 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
603 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
604 a8083063 Iustin Pop
605 a8083063 Iustin Pop
606 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
607 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
608 a8083063 Iustin Pop

609 a8083063 Iustin Pop
  """
610 a8083063 Iustin Pop
  _OP_REQP = []
611 a8083063 Iustin Pop
612 a8083063 Iustin Pop
  def CheckPrereq(self):
613 a8083063 Iustin Pop
    """Check prerequisites.
614 a8083063 Iustin Pop

615 a8083063 Iustin Pop
    This checks whether the cluster is empty.
616 a8083063 Iustin Pop

617 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
618 a8083063 Iustin Pop

619 a8083063 Iustin Pop
    """
620 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
621 a8083063 Iustin Pop
622 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
623 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
624 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
625 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
626 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
627 db915bd1 Michael Hanselmann
    if instancelist:
628 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
629 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
630 a8083063 Iustin Pop
631 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
632 a8083063 Iustin Pop
    """Destroys the cluster.
633 a8083063 Iustin Pop

634 a8083063 Iustin Pop
    """
635 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
636 c9064964 Iustin Pop
    if not rpc.call_node_stop_master(master):
637 c9064964 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
638 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
639 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
640 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
641 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
642 a8083063 Iustin Pop
643 a8083063 Iustin Pop
644 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
645 a8083063 Iustin Pop
  """Verifies the cluster status.
646 a8083063 Iustin Pop

647 a8083063 Iustin Pop
  """
648 a8083063 Iustin Pop
  _OP_REQP = []
649 a8083063 Iustin Pop
650 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
651 a8083063 Iustin Pop
                  remote_version, feedback_fn):
652 a8083063 Iustin Pop
    """Run multiple tests against a node.
653 a8083063 Iustin Pop

654 a8083063 Iustin Pop
    Test list:
655 a8083063 Iustin Pop
      - compares ganeti version
656 a8083063 Iustin Pop
      - checks vg existance and size > 20G
657 a8083063 Iustin Pop
      - checks config file checksum
658 a8083063 Iustin Pop
      - checks ssh to other nodes
659 a8083063 Iustin Pop

660 a8083063 Iustin Pop
    Args:
661 a8083063 Iustin Pop
      node: name of the node to check
662 a8083063 Iustin Pop
      file_list: required list of files
663 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
664 098c0958 Michael Hanselmann

665 a8083063 Iustin Pop
    """
666 a8083063 Iustin Pop
    # compares ganeti version
667 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
668 a8083063 Iustin Pop
    if not remote_version:
669 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
670 a8083063 Iustin Pop
      return True
671 a8083063 Iustin Pop
672 a8083063 Iustin Pop
    if local_version != remote_version:
673 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
674 a8083063 Iustin Pop
                      (local_version, node, remote_version))
675 a8083063 Iustin Pop
      return True
676 a8083063 Iustin Pop
677 a8083063 Iustin Pop
    # checks vg existance and size > 20G
678 a8083063 Iustin Pop
679 a8083063 Iustin Pop
    bad = False
680 a8083063 Iustin Pop
    if not vglist:
681 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
682 a8083063 Iustin Pop
                      (node,))
683 a8083063 Iustin Pop
      bad = True
684 a8083063 Iustin Pop
    else:
685 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
686 a8083063 Iustin Pop
      if vgstatus:
687 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
688 a8083063 Iustin Pop
        bad = True
689 a8083063 Iustin Pop
690 a8083063 Iustin Pop
    # checks config file checksum
691 a8083063 Iustin Pop
    # checks ssh to any
692 a8083063 Iustin Pop
693 a8083063 Iustin Pop
    if 'filelist' not in node_result:
694 a8083063 Iustin Pop
      bad = True
695 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
696 a8083063 Iustin Pop
    else:
697 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
698 a8083063 Iustin Pop
      for file_name in file_list:
699 a8083063 Iustin Pop
        if file_name not in remote_cksum:
700 a8083063 Iustin Pop
          bad = True
701 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
702 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
703 a8083063 Iustin Pop
          bad = True
704 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
705 a8083063 Iustin Pop
706 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
707 a8083063 Iustin Pop
      bad = True
708 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
709 a8083063 Iustin Pop
    else:
710 a8083063 Iustin Pop
      if node_result['nodelist']:
711 a8083063 Iustin Pop
        bad = True
712 a8083063 Iustin Pop
        for node in node_result['nodelist']:
713 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
714 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
715 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
716 a8083063 Iustin Pop
    if hyp_result is not None:
717 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
718 a8083063 Iustin Pop
    return bad
719 a8083063 Iustin Pop
720 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
721 a8083063 Iustin Pop
    """Verify an instance.
722 a8083063 Iustin Pop

723 a8083063 Iustin Pop
    This function checks to see if the required block devices are
724 a8083063 Iustin Pop
    available on the instance's node.
725 a8083063 Iustin Pop

726 a8083063 Iustin Pop
    """
727 a8083063 Iustin Pop
    bad = False
728 a8083063 Iustin Pop
729 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
730 a8083063 Iustin Pop
    if not instance in instancelist:
731 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
732 a8083063 Iustin Pop
                      (instance, instancelist))
733 a8083063 Iustin Pop
      bad = True
734 a8083063 Iustin Pop
735 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
736 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
737 a8083063 Iustin Pop
738 a8083063 Iustin Pop
    node_vol_should = {}
739 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
740 a8083063 Iustin Pop
741 a8083063 Iustin Pop
    for node in node_vol_should:
742 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
743 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
744 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
745 a8083063 Iustin Pop
                          (volume, node))
746 a8083063 Iustin Pop
          bad = True
747 a8083063 Iustin Pop
748 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
749 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
750 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
751 a8083063 Iustin Pop
                        (instance, node_current))
752 a8083063 Iustin Pop
        bad = True
753 a8083063 Iustin Pop
754 a8083063 Iustin Pop
    for node in node_instance:
755 a8083063 Iustin Pop
      if (not node == node_current):
756 a8083063 Iustin Pop
        if instance in node_instance[node]:
757 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
758 a8083063 Iustin Pop
                          (instance, node))
759 a8083063 Iustin Pop
          bad = True
760 a8083063 Iustin Pop
761 6a438c98 Michael Hanselmann
    return bad
762 a8083063 Iustin Pop
763 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
764 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
765 a8083063 Iustin Pop

766 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
767 a8083063 Iustin Pop
    reported as unknown.
768 a8083063 Iustin Pop

769 a8083063 Iustin Pop
    """
770 a8083063 Iustin Pop
    bad = False
771 a8083063 Iustin Pop
772 a8083063 Iustin Pop
    for node in node_vol_is:
773 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
774 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
775 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
776 a8083063 Iustin Pop
                      (volume, node))
777 a8083063 Iustin Pop
          bad = True
778 a8083063 Iustin Pop
    return bad
779 a8083063 Iustin Pop
780 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
781 a8083063 Iustin Pop
    """Verify the list of running instances.
782 a8083063 Iustin Pop

783 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
784 a8083063 Iustin Pop

785 a8083063 Iustin Pop
    """
786 a8083063 Iustin Pop
    bad = False
787 a8083063 Iustin Pop
    for node in node_instance:
788 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
789 a8083063 Iustin Pop
        if runninginstance not in instancelist:
790 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
791 a8083063 Iustin Pop
                          (runninginstance, node))
792 a8083063 Iustin Pop
          bad = True
793 a8083063 Iustin Pop
    return bad
794 a8083063 Iustin Pop
795 a8083063 Iustin Pop
  def CheckPrereq(self):
796 a8083063 Iustin Pop
    """Check prerequisites.
797 a8083063 Iustin Pop

798 a8083063 Iustin Pop
    This has no prerequisites.
799 a8083063 Iustin Pop

800 a8083063 Iustin Pop
    """
801 a8083063 Iustin Pop
    pass
802 a8083063 Iustin Pop
803 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
804 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
805 a8083063 Iustin Pop

806 a8083063 Iustin Pop
    """
807 a8083063 Iustin Pop
    bad = False
808 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
809 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
810 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
811 a8083063 Iustin Pop
812 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
813 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
814 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
815 a8083063 Iustin Pop
    node_volume = {}
816 a8083063 Iustin Pop
    node_instance = {}
817 a8083063 Iustin Pop
818 a8083063 Iustin Pop
    # FIXME: verify OS list
819 a8083063 Iustin Pop
    # do local checksums
820 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
821 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
822 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
823 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
824 a8083063 Iustin Pop
825 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
826 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
827 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
828 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
829 a8083063 Iustin Pop
    node_verify_param = {
830 a8083063 Iustin Pop
      'filelist': file_names,
831 a8083063 Iustin Pop
      'nodelist': nodelist,
832 a8083063 Iustin Pop
      'hypervisor': None,
833 a8083063 Iustin Pop
      }
834 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
835 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
836 a8083063 Iustin Pop
837 a8083063 Iustin Pop
    for node in nodelist:
838 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
839 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
840 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
841 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
842 a8083063 Iustin Pop
      bad = bad or result
843 a8083063 Iustin Pop
844 a8083063 Iustin Pop
      # node_volume
845 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
846 a8083063 Iustin Pop
847 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
848 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
849 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
850 b63ed789 Iustin Pop
        bad = True
851 b63ed789 Iustin Pop
        node_volume[node] = {}
852 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
853 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
854 a8083063 Iustin Pop
        bad = True
855 a8083063 Iustin Pop
        continue
856 b63ed789 Iustin Pop
      else:
857 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
858 a8083063 Iustin Pop
859 a8083063 Iustin Pop
      # node_instance
860 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
861 a8083063 Iustin Pop
      if type(nodeinstance) != list:
862 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
863 a8083063 Iustin Pop
        bad = True
864 a8083063 Iustin Pop
        continue
865 a8083063 Iustin Pop
866 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
867 a8083063 Iustin Pop
868 a8083063 Iustin Pop
    node_vol_should = {}
869 a8083063 Iustin Pop
870 a8083063 Iustin Pop
    for instance in instancelist:
871 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
872 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
873 a8083063 Iustin Pop
                                     feedback_fn)
874 a8083063 Iustin Pop
      bad = bad or result
875 a8083063 Iustin Pop
876 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
877 a8083063 Iustin Pop
878 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
879 a8083063 Iustin Pop
880 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
881 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
882 a8083063 Iustin Pop
                                       feedback_fn)
883 a8083063 Iustin Pop
    bad = bad or result
884 a8083063 Iustin Pop
885 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
886 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
887 a8083063 Iustin Pop
                                         feedback_fn)
888 a8083063 Iustin Pop
    bad = bad or result
889 a8083063 Iustin Pop
890 a8083063 Iustin Pop
    return int(bad)
891 a8083063 Iustin Pop
892 a8083063 Iustin Pop
893 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
894 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
895 2c95a8d4 Iustin Pop

896 2c95a8d4 Iustin Pop
  """
897 2c95a8d4 Iustin Pop
  _OP_REQP = []
898 2c95a8d4 Iustin Pop
899 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
900 2c95a8d4 Iustin Pop
    """Check prerequisites.
901 2c95a8d4 Iustin Pop

902 2c95a8d4 Iustin Pop
    This has no prerequisites.
903 2c95a8d4 Iustin Pop

904 2c95a8d4 Iustin Pop
    """
905 2c95a8d4 Iustin Pop
    pass
906 2c95a8d4 Iustin Pop
907 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
908 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
909 2c95a8d4 Iustin Pop

910 2c95a8d4 Iustin Pop
    """
911 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
912 2c95a8d4 Iustin Pop
913 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
914 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
915 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
916 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
917 2c95a8d4 Iustin Pop
918 2c95a8d4 Iustin Pop
    nv_dict = {}
919 2c95a8d4 Iustin Pop
    for inst in instances:
920 2c95a8d4 Iustin Pop
      inst_lvs = {}
921 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
922 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
923 2c95a8d4 Iustin Pop
        continue
924 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
925 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
926 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
927 2c95a8d4 Iustin Pop
        for vol in vol_list:
928 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
929 2c95a8d4 Iustin Pop
930 2c95a8d4 Iustin Pop
    if not nv_dict:
931 2c95a8d4 Iustin Pop
      return result
932 2c95a8d4 Iustin Pop
933 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
934 2c95a8d4 Iustin Pop
935 2c95a8d4 Iustin Pop
    to_act = set()
936 2c95a8d4 Iustin Pop
    for node in nodes:
937 2c95a8d4 Iustin Pop
      # node_volume
938 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
939 2c95a8d4 Iustin Pop
940 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
941 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
942 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
943 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
944 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
945 2c95a8d4 Iustin Pop
                    (node,))
946 2c95a8d4 Iustin Pop
        res_nodes.append(node)
947 2c95a8d4 Iustin Pop
        continue
948 2c95a8d4 Iustin Pop
949 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
950 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
951 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
952 b63ed789 Iustin Pop
            and inst.name not in res_instances):
953 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
954 2c95a8d4 Iustin Pop
955 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
956 b63ed789 Iustin Pop
    # data better
957 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
958 b63ed789 Iustin Pop
      if inst.name not in res_missing:
959 b63ed789 Iustin Pop
        res_missing[inst.name] = []
960 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
961 b63ed789 Iustin Pop
962 2c95a8d4 Iustin Pop
    return result
963 2c95a8d4 Iustin Pop
964 2c95a8d4 Iustin Pop
965 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
966 07bd8a51 Iustin Pop
  """Rename the cluster.
967 07bd8a51 Iustin Pop

968 07bd8a51 Iustin Pop
  """
969 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
970 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
971 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
972 07bd8a51 Iustin Pop
973 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
974 07bd8a51 Iustin Pop
    """Build hooks env.
975 07bd8a51 Iustin Pop

976 07bd8a51 Iustin Pop
    """
977 07bd8a51 Iustin Pop
    env = {
978 488b540d Iustin Pop
      "OP_TARGET": self.sstore.GetClusterName(),
979 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
980 07bd8a51 Iustin Pop
      }
981 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
982 07bd8a51 Iustin Pop
    return env, [mn], [mn]
983 07bd8a51 Iustin Pop
984 07bd8a51 Iustin Pop
  def CheckPrereq(self):
985 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
986 07bd8a51 Iustin Pop

987 07bd8a51 Iustin Pop
    """
988 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
989 07bd8a51 Iustin Pop
990 bcf043c9 Iustin Pop
    new_name = hostname.name
991 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
992 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
993 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
994 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
995 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
996 07bd8a51 Iustin Pop
                                 " cluster has changed")
997 07bd8a51 Iustin Pop
    if new_ip != old_ip:
998 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
999 07bd8a51 Iustin Pop
      if not result.failed:
1000 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
1001 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
1002 07bd8a51 Iustin Pop
                                   new_ip)
1003 07bd8a51 Iustin Pop
1004 07bd8a51 Iustin Pop
    self.op.name = new_name
1005 07bd8a51 Iustin Pop
1006 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1007 07bd8a51 Iustin Pop
    """Rename the cluster.
1008 07bd8a51 Iustin Pop

1009 07bd8a51 Iustin Pop
    """
1010 07bd8a51 Iustin Pop
    clustername = self.op.name
1011 07bd8a51 Iustin Pop
    ip = self.ip
1012 07bd8a51 Iustin Pop
    ss = self.sstore
1013 07bd8a51 Iustin Pop
1014 07bd8a51 Iustin Pop
    # shutdown the master IP
1015 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1016 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
1017 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1018 07bd8a51 Iustin Pop
1019 07bd8a51 Iustin Pop
    try:
1020 07bd8a51 Iustin Pop
      # modify the sstore
1021 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1022 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1023 07bd8a51 Iustin Pop
1024 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1025 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1026 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1027 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1028 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1029 07bd8a51 Iustin Pop
1030 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1031 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1032 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1033 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1034 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1035 07bd8a51 Iustin Pop
          if not result[to_node]:
1036 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1037 07bd8a51 Iustin Pop
                         (fname, to_node))
1038 07bd8a51 Iustin Pop
    finally:
1039 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
1040 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1041 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1042 07bd8a51 Iustin Pop
1043 07bd8a51 Iustin Pop
1044 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1045 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1046 a8083063 Iustin Pop

1047 a8083063 Iustin Pop
  """
1048 a8083063 Iustin Pop
  if not instance.disks:
1049 a8083063 Iustin Pop
    return True
1050 a8083063 Iustin Pop
1051 a8083063 Iustin Pop
  if not oneshot:
1052 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1053 a8083063 Iustin Pop
1054 a8083063 Iustin Pop
  node = instance.primary_node
1055 a8083063 Iustin Pop
1056 a8083063 Iustin Pop
  for dev in instance.disks:
1057 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1058 a8083063 Iustin Pop
1059 a8083063 Iustin Pop
  retries = 0
1060 a8083063 Iustin Pop
  while True:
1061 a8083063 Iustin Pop
    max_time = 0
1062 a8083063 Iustin Pop
    done = True
1063 a8083063 Iustin Pop
    cumul_degraded = False
1064 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1065 a8083063 Iustin Pop
    if not rstats:
1066 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1067 a8083063 Iustin Pop
      retries += 1
1068 a8083063 Iustin Pop
      if retries >= 10:
1069 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1070 3ecf6786 Iustin Pop
                                 " aborting." % node)
1071 a8083063 Iustin Pop
      time.sleep(6)
1072 a8083063 Iustin Pop
      continue
1073 a8083063 Iustin Pop
    retries = 0
1074 a8083063 Iustin Pop
    for i in range(len(rstats)):
1075 a8083063 Iustin Pop
      mstat = rstats[i]
1076 a8083063 Iustin Pop
      if mstat is None:
1077 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1078 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1079 a8083063 Iustin Pop
        continue
1080 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1081 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1082 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1083 a8083063 Iustin Pop
      if perc_done is not None:
1084 a8083063 Iustin Pop
        done = False
1085 a8083063 Iustin Pop
        if est_time is not None:
1086 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1087 a8083063 Iustin Pop
          max_time = est_time
1088 a8083063 Iustin Pop
        else:
1089 a8083063 Iustin Pop
          rem_time = "no time estimate"
1090 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1091 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1092 a8083063 Iustin Pop
    if done or oneshot:
1093 a8083063 Iustin Pop
      break
1094 a8083063 Iustin Pop
1095 a8083063 Iustin Pop
    if unlock:
1096 a8083063 Iustin Pop
      utils.Unlock('cmd')
1097 a8083063 Iustin Pop
    try:
1098 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1099 a8083063 Iustin Pop
    finally:
1100 a8083063 Iustin Pop
      if unlock:
1101 a8083063 Iustin Pop
        utils.Lock('cmd')
1102 a8083063 Iustin Pop
1103 a8083063 Iustin Pop
  if done:
1104 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1105 a8083063 Iustin Pop
  return not cumul_degraded
1106 a8083063 Iustin Pop
1107 a8083063 Iustin Pop
1108 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1109 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1110 a8083063 Iustin Pop

1111 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1112 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1113 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1114 0834c866 Iustin Pop

1115 a8083063 Iustin Pop
  """
1116 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1117 0834c866 Iustin Pop
  if ldisk:
1118 0834c866 Iustin Pop
    idx = 6
1119 0834c866 Iustin Pop
  else:
1120 0834c866 Iustin Pop
    idx = 5
1121 a8083063 Iustin Pop
1122 a8083063 Iustin Pop
  result = True
1123 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1124 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1125 a8083063 Iustin Pop
    if not rstats:
1126 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1127 a8083063 Iustin Pop
      result = False
1128 a8083063 Iustin Pop
    else:
1129 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1130 a8083063 Iustin Pop
  if dev.children:
1131 a8083063 Iustin Pop
    for child in dev.children:
1132 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1133 a8083063 Iustin Pop
1134 a8083063 Iustin Pop
  return result
1135 a8083063 Iustin Pop
1136 a8083063 Iustin Pop
1137 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1138 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1139 a8083063 Iustin Pop

1140 a8083063 Iustin Pop
  """
1141 a8083063 Iustin Pop
  _OP_REQP = []
1142 a8083063 Iustin Pop
1143 a8083063 Iustin Pop
  def CheckPrereq(self):
1144 a8083063 Iustin Pop
    """Check prerequisites.
1145 a8083063 Iustin Pop

1146 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1147 a8083063 Iustin Pop

1148 a8083063 Iustin Pop
    """
1149 a8083063 Iustin Pop
    return
1150 a8083063 Iustin Pop
1151 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1152 a8083063 Iustin Pop
    """Compute the list of OSes.
1153 a8083063 Iustin Pop

1154 a8083063 Iustin Pop
    """
1155 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1156 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1157 a8083063 Iustin Pop
    if node_data == False:
1158 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1159 a8083063 Iustin Pop
    return node_data
1160 a8083063 Iustin Pop
1161 a8083063 Iustin Pop
1162 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1163 a8083063 Iustin Pop
  """Logical unit for removing a node.
1164 a8083063 Iustin Pop

1165 a8083063 Iustin Pop
  """
1166 a8083063 Iustin Pop
  HPATH = "node-remove"
1167 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1168 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1169 a8083063 Iustin Pop
1170 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1171 a8083063 Iustin Pop
    """Build hooks env.
1172 a8083063 Iustin Pop

1173 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1174 a8083063 Iustin Pop
    node would not allows itself to run.
1175 a8083063 Iustin Pop

1176 a8083063 Iustin Pop
    """
1177 396e1b78 Michael Hanselmann
    env = {
1178 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1179 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1180 396e1b78 Michael Hanselmann
      }
1181 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1182 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1183 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1184 a8083063 Iustin Pop
1185 a8083063 Iustin Pop
  def CheckPrereq(self):
1186 a8083063 Iustin Pop
    """Check prerequisites.
1187 a8083063 Iustin Pop

1188 a8083063 Iustin Pop
    This checks:
1189 a8083063 Iustin Pop
     - the node exists in the configuration
1190 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1191 a8083063 Iustin Pop
     - it's not the master
1192 a8083063 Iustin Pop

1193 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1194 a8083063 Iustin Pop

1195 a8083063 Iustin Pop
    """
1196 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1197 a8083063 Iustin Pop
    if node is None:
1198 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1199 a8083063 Iustin Pop
1200 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1201 a8083063 Iustin Pop
1202 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1203 a8083063 Iustin Pop
    if node.name == masternode:
1204 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1205 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1206 a8083063 Iustin Pop
1207 a8083063 Iustin Pop
    for instance_name in instance_list:
1208 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1209 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1210 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1211 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1212 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1213 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1214 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1215 a8083063 Iustin Pop
    self.op.node_name = node.name
1216 a8083063 Iustin Pop
    self.node = node
1217 a8083063 Iustin Pop
1218 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1219 a8083063 Iustin Pop
    """Removes the node from the cluster.
1220 a8083063 Iustin Pop

1221 a8083063 Iustin Pop
    """
1222 a8083063 Iustin Pop
    node = self.node
1223 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1224 a8083063 Iustin Pop
                node.name)
1225 a8083063 Iustin Pop
1226 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1227 a8083063 Iustin Pop
1228 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1229 a8083063 Iustin Pop
1230 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1231 a8083063 Iustin Pop
1232 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1233 a8083063 Iustin Pop
1234 c8a0948f Michael Hanselmann
    _RemoveHostFromEtcHosts(node.name)
1235 c8a0948f Michael Hanselmann
1236 a8083063 Iustin Pop
1237 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1238 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1239 a8083063 Iustin Pop

1240 a8083063 Iustin Pop
  """
1241 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1242 a8083063 Iustin Pop
1243 a8083063 Iustin Pop
  def CheckPrereq(self):
1244 a8083063 Iustin Pop
    """Check prerequisites.
1245 a8083063 Iustin Pop

1246 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1247 a8083063 Iustin Pop

1248 a8083063 Iustin Pop
    """
1249 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1250 3ef10550 Michael Hanselmann
                                     "mtotal", "mnode", "mfree",
1251 3ef10550 Michael Hanselmann
                                     "bootid"])
1252 a8083063 Iustin Pop
1253 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1254 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1255 ec223efb Iustin Pop
                               "pip", "sip"],
1256 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1257 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1258 a8083063 Iustin Pop
1259 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1260 a8083063 Iustin Pop
1261 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1262 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1263 a8083063 Iustin Pop

1264 a8083063 Iustin Pop
    """
1265 246e180a Iustin Pop
    nodenames = self.wanted
1266 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1267 a8083063 Iustin Pop
1268 a8083063 Iustin Pop
    # begin data gathering
1269 a8083063 Iustin Pop
1270 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1271 a8083063 Iustin Pop
      live_data = {}
1272 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1273 a8083063 Iustin Pop
      for name in nodenames:
1274 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1275 a8083063 Iustin Pop
        if nodeinfo:
1276 a8083063 Iustin Pop
          live_data[name] = {
1277 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1278 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1279 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1280 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1281 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1282 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1283 a8083063 Iustin Pop
            }
1284 a8083063 Iustin Pop
        else:
1285 a8083063 Iustin Pop
          live_data[name] = {}
1286 a8083063 Iustin Pop
    else:
1287 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1288 a8083063 Iustin Pop
1289 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1290 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1291 a8083063 Iustin Pop
1292 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1293 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1294 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1295 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1296 a8083063 Iustin Pop
1297 ec223efb Iustin Pop
      for instance_name in instancelist:
1298 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1299 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1300 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1301 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1302 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1303 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1304 a8083063 Iustin Pop
1305 a8083063 Iustin Pop
    # end data gathering
1306 a8083063 Iustin Pop
1307 a8083063 Iustin Pop
    output = []
1308 a8083063 Iustin Pop
    for node in nodelist:
1309 a8083063 Iustin Pop
      node_output = []
1310 a8083063 Iustin Pop
      for field in self.op.output_fields:
1311 a8083063 Iustin Pop
        if field == "name":
1312 a8083063 Iustin Pop
          val = node.name
1313 ec223efb Iustin Pop
        elif field == "pinst_list":
1314 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1315 ec223efb Iustin Pop
        elif field == "sinst_list":
1316 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1317 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1318 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1319 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1320 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1321 a8083063 Iustin Pop
        elif field == "pip":
1322 a8083063 Iustin Pop
          val = node.primary_ip
1323 a8083063 Iustin Pop
        elif field == "sip":
1324 a8083063 Iustin Pop
          val = node.secondary_ip
1325 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1326 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1327 a8083063 Iustin Pop
        else:
1328 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1329 a8083063 Iustin Pop
        node_output.append(val)
1330 a8083063 Iustin Pop
      output.append(node_output)
1331 a8083063 Iustin Pop
1332 a8083063 Iustin Pop
    return output
1333 a8083063 Iustin Pop
1334 a8083063 Iustin Pop
1335 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1336 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1337 dcb93971 Michael Hanselmann

1338 dcb93971 Michael Hanselmann
  """
1339 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1340 dcb93971 Michael Hanselmann
1341 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1342 dcb93971 Michael Hanselmann
    """Check prerequisites.
1343 dcb93971 Michael Hanselmann

1344 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1345 dcb93971 Michael Hanselmann

1346 dcb93971 Michael Hanselmann
    """
1347 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1348 dcb93971 Michael Hanselmann
1349 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1350 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1351 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1352 dcb93971 Michael Hanselmann
1353 dcb93971 Michael Hanselmann
1354 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1355 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1356 dcb93971 Michael Hanselmann

1357 dcb93971 Michael Hanselmann
    """
1358 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1359 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1360 dcb93971 Michael Hanselmann
1361 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1362 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1363 dcb93971 Michael Hanselmann
1364 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1365 dcb93971 Michael Hanselmann
1366 dcb93971 Michael Hanselmann
    output = []
1367 dcb93971 Michael Hanselmann
    for node in nodenames:
1368 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1369 37d19eb2 Michael Hanselmann
        continue
1370 37d19eb2 Michael Hanselmann
1371 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1372 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1373 dcb93971 Michael Hanselmann
1374 dcb93971 Michael Hanselmann
      for vol in node_vols:
1375 dcb93971 Michael Hanselmann
        node_output = []
1376 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1377 dcb93971 Michael Hanselmann
          if field == "node":
1378 dcb93971 Michael Hanselmann
            val = node
1379 dcb93971 Michael Hanselmann
          elif field == "phys":
1380 dcb93971 Michael Hanselmann
            val = vol['dev']
1381 dcb93971 Michael Hanselmann
          elif field == "vg":
1382 dcb93971 Michael Hanselmann
            val = vol['vg']
1383 dcb93971 Michael Hanselmann
          elif field == "name":
1384 dcb93971 Michael Hanselmann
            val = vol['name']
1385 dcb93971 Michael Hanselmann
          elif field == "size":
1386 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1387 dcb93971 Michael Hanselmann
          elif field == "instance":
1388 dcb93971 Michael Hanselmann
            for inst in ilist:
1389 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1390 dcb93971 Michael Hanselmann
                continue
1391 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1392 dcb93971 Michael Hanselmann
                val = inst.name
1393 dcb93971 Michael Hanselmann
                break
1394 dcb93971 Michael Hanselmann
            else:
1395 dcb93971 Michael Hanselmann
              val = '-'
1396 dcb93971 Michael Hanselmann
          else:
1397 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1398 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1399 dcb93971 Michael Hanselmann
1400 dcb93971 Michael Hanselmann
        output.append(node_output)
1401 dcb93971 Michael Hanselmann
1402 dcb93971 Michael Hanselmann
    return output
1403 dcb93971 Michael Hanselmann
1404 dcb93971 Michael Hanselmann
1405 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1406 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1407 a8083063 Iustin Pop

1408 a8083063 Iustin Pop
  """
1409 a8083063 Iustin Pop
  HPATH = "node-add"
1410 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1411 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1412 a8083063 Iustin Pop
1413 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1414 a8083063 Iustin Pop
    """Build hooks env.
1415 a8083063 Iustin Pop

1416 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1417 a8083063 Iustin Pop

1418 a8083063 Iustin Pop
    """
1419 a8083063 Iustin Pop
    env = {
1420 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1421 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1422 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1423 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1424 a8083063 Iustin Pop
      }
1425 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1426 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1427 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1428 a8083063 Iustin Pop
1429 a8083063 Iustin Pop
  def CheckPrereq(self):
1430 a8083063 Iustin Pop
    """Check prerequisites.
1431 a8083063 Iustin Pop

1432 a8083063 Iustin Pop
    This checks:
1433 a8083063 Iustin Pop
     - the new node is not already in the config
1434 a8083063 Iustin Pop
     - it is resolvable
1435 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1436 a8083063 Iustin Pop

1437 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1438 a8083063 Iustin Pop

1439 a8083063 Iustin Pop
    """
1440 a8083063 Iustin Pop
    node_name = self.op.node_name
1441 a8083063 Iustin Pop
    cfg = self.cfg
1442 a8083063 Iustin Pop
1443 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1444 a8083063 Iustin Pop
1445 bcf043c9 Iustin Pop
    node = dns_data.name
1446 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1447 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1448 a8083063 Iustin Pop
    if secondary_ip is None:
1449 a8083063 Iustin Pop
      secondary_ip = primary_ip
1450 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1451 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1452 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1453 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1454 a8083063 Iustin Pop
    if node in node_list:
1455 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1456 3ecf6786 Iustin Pop
                                 % node)
1457 a8083063 Iustin Pop
1458 a8083063 Iustin Pop
    for existing_node_name in node_list:
1459 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1460 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1461 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1462 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1463 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1464 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1465 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1466 a8083063 Iustin Pop
1467 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1468 a8083063 Iustin Pop
    # same as for the master
1469 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1470 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1471 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1472 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1473 a8083063 Iustin Pop
      if master_singlehomed:
1474 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1475 3ecf6786 Iustin Pop
                                   " new node has one")
1476 a8083063 Iustin Pop
      else:
1477 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1478 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1479 a8083063 Iustin Pop
1480 a8083063 Iustin Pop
    # checks reachablity
1481 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(utils.HostInfo().name,
1482 16abfbc2 Alexander Schreiber
                         primary_ip,
1483 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
1484 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1485 a8083063 Iustin Pop
1486 a8083063 Iustin Pop
    if not newbie_singlehomed:
1487 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1488 16abfbc2 Alexander Schreiber
      if not utils.TcpPing(myself.secondary_ip,
1489 16abfbc2 Alexander Schreiber
                           secondary_ip,
1490 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT):
1491 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1492 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1493 a8083063 Iustin Pop
1494 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1495 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1496 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1497 a8083063 Iustin Pop
1498 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1499 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
1500 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Cluster VNC password file %s missing" %
1501 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
1502 2a6469d5 Alexander Schreiber
1503 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1504 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1505 a8083063 Iustin Pop

1506 a8083063 Iustin Pop
    """
1507 a8083063 Iustin Pop
    new_node = self.new_node
1508 a8083063 Iustin Pop
    node = new_node.name
1509 a8083063 Iustin Pop
1510 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1511 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1512 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1513 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1514 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1515 a8083063 Iustin Pop
    try:
1516 a8083063 Iustin Pop
      gntpem = f.read(8192)
1517 a8083063 Iustin Pop
    finally:
1518 a8083063 Iustin Pop
      f.close()
1519 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1520 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1521 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1522 a8083063 Iustin Pop
    # parsed by the shell sequence below
1523 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1524 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1525 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1526 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1527 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1528 a8083063 Iustin Pop
1529 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1530 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1531 a8083063 Iustin Pop
    # either by being constants or by the checks above
1532 a8083063 Iustin Pop
    ss = self.sstore
1533 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1534 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1535 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1536 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1537 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1538 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1539 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1540 a8083063 Iustin Pop
1541 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1542 a8083063 Iustin Pop
    if result.failed:
1543 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1544 3ecf6786 Iustin Pop
                               " output: %s" %
1545 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1546 a8083063 Iustin Pop
1547 a8083063 Iustin Pop
    # check connectivity
1548 a8083063 Iustin Pop
    time.sleep(4)
1549 a8083063 Iustin Pop
1550 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1551 a8083063 Iustin Pop
    if result:
1552 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1553 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1554 a8083063 Iustin Pop
                    (node, result))
1555 a8083063 Iustin Pop
      else:
1556 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1557 3ecf6786 Iustin Pop
                                 " node version %s" %
1558 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1559 a8083063 Iustin Pop
    else:
1560 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1561 a8083063 Iustin Pop
1562 a8083063 Iustin Pop
    # setup ssh on node
1563 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1564 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1565 a8083063 Iustin Pop
    keyarray = []
1566 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1567 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1568 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1569 a8083063 Iustin Pop
1570 a8083063 Iustin Pop
    for i in keyfiles:
1571 a8083063 Iustin Pop
      f = open(i, 'r')
1572 a8083063 Iustin Pop
      try:
1573 a8083063 Iustin Pop
        keyarray.append(f.read())
1574 a8083063 Iustin Pop
      finally:
1575 a8083063 Iustin Pop
        f.close()
1576 a8083063 Iustin Pop
1577 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1578 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1579 a8083063 Iustin Pop
1580 a8083063 Iustin Pop
    if not result:
1581 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1582 a8083063 Iustin Pop
1583 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1584 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(new_node.name)
1585 c8a0948f Michael Hanselmann
1586 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1587 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1588 a8083063 Iustin Pop
1589 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1590 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1591 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1592 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1593 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1594 16abfbc2 Alexander Schreiber
                                    10, False):
1595 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1596 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1597 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1598 a8083063 Iustin Pop
1599 ff98055b Iustin Pop
    success, msg = ssh.VerifyNodeHostname(node)
1600 ff98055b Iustin Pop
    if not success:
1601 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1602 f4bc1f2c Michael Hanselmann
                               " than the one the resolver gives: %s."
1603 f4bc1f2c Michael Hanselmann
                               " Please fix and re-run this command." %
1604 ff98055b Iustin Pop
                               (node, msg))
1605 ff98055b Iustin Pop
1606 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1607 a8083063 Iustin Pop
    # including the node just added
1608 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1609 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1610 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1611 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1612 a8083063 Iustin Pop
1613 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1614 107711b0 Michael Hanselmann
    for fname in (constants.ETC_HOSTS, constants.SSH_KNOWN_HOSTS_FILE):
1615 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1616 a8083063 Iustin Pop
      for to_node in dist_nodes:
1617 a8083063 Iustin Pop
        if not result[to_node]:
1618 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1619 a8083063 Iustin Pop
                       (fname, to_node))
1620 a8083063 Iustin Pop
1621 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1622 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1623 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1624 a8083063 Iustin Pop
    for fname in to_copy:
1625 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1626 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1627 a8083063 Iustin Pop
1628 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1629 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1630 a8083063 Iustin Pop
1631 a8083063 Iustin Pop
1632 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1633 a8083063 Iustin Pop
  """Failover the master node to the current node.
1634 a8083063 Iustin Pop

1635 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1636 a8083063 Iustin Pop

1637 a8083063 Iustin Pop
  """
1638 a8083063 Iustin Pop
  HPATH = "master-failover"
1639 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1640 a8083063 Iustin Pop
  REQ_MASTER = False
1641 a8083063 Iustin Pop
  _OP_REQP = []
1642 a8083063 Iustin Pop
1643 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1644 a8083063 Iustin Pop
    """Build hooks env.
1645 a8083063 Iustin Pop

1646 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1647 a8083063 Iustin Pop
    the nodes in the post phase.
1648 a8083063 Iustin Pop

1649 a8083063 Iustin Pop
    """
1650 a8083063 Iustin Pop
    env = {
1651 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1652 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1653 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1654 a8083063 Iustin Pop
      }
1655 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1656 a8083063 Iustin Pop
1657 a8083063 Iustin Pop
  def CheckPrereq(self):
1658 a8083063 Iustin Pop
    """Check prerequisites.
1659 a8083063 Iustin Pop

1660 a8083063 Iustin Pop
    This checks that we are not already the master.
1661 a8083063 Iustin Pop

1662 a8083063 Iustin Pop
    """
1663 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1664 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1665 a8083063 Iustin Pop
1666 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1667 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1668 f4bc1f2c Michael Hanselmann
                                 " where you want the new master to be."
1669 f4bc1f2c Michael Hanselmann
                                 " %s is already the master" %
1670 3ecf6786 Iustin Pop
                                 self.old_master)
1671 a8083063 Iustin Pop
1672 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1673 a8083063 Iustin Pop
    """Failover the master node.
1674 a8083063 Iustin Pop

1675 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1676 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1677 a8083063 Iustin Pop
    master.
1678 a8083063 Iustin Pop

1679 a8083063 Iustin Pop
    """
1680 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1681 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1682 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1683 a8083063 Iustin Pop
1684 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1685 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1686 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1687 a8083063 Iustin Pop
1688 880478f8 Iustin Pop
    ss = self.sstore
1689 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1690 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1691 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1692 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1693 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1694 880478f8 Iustin Pop
1695 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1696 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1697 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1698 f4bc1f2c Michael Hanselmann
      feedback_fn("Error in activating the master IP on the new master,"
1699 f4bc1f2c Michael Hanselmann
                  " please fix manually.")
1700 a8083063 Iustin Pop
1701 a8083063 Iustin Pop
1702 a8083063 Iustin Pop
1703 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1704 a8083063 Iustin Pop
  """Query cluster configuration.
1705 a8083063 Iustin Pop

1706 a8083063 Iustin Pop
  """
1707 a8083063 Iustin Pop
  _OP_REQP = []
1708 59322403 Iustin Pop
  REQ_MASTER = False
1709 a8083063 Iustin Pop
1710 a8083063 Iustin Pop
  def CheckPrereq(self):
1711 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1712 a8083063 Iustin Pop

1713 a8083063 Iustin Pop
    """
1714 a8083063 Iustin Pop
    pass
1715 a8083063 Iustin Pop
1716 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1717 a8083063 Iustin Pop
    """Return cluster config.
1718 a8083063 Iustin Pop

1719 a8083063 Iustin Pop
    """
1720 a8083063 Iustin Pop
    result = {
1721 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1722 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1723 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1724 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1725 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1726 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1727 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1728 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1729 a8083063 Iustin Pop
      }
1730 a8083063 Iustin Pop
1731 a8083063 Iustin Pop
    return result
1732 a8083063 Iustin Pop
1733 a8083063 Iustin Pop
1734 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1735 a8083063 Iustin Pop
  """Copy file to cluster.
1736 a8083063 Iustin Pop

1737 a8083063 Iustin Pop
  """
1738 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1739 a8083063 Iustin Pop
1740 a8083063 Iustin Pop
  def CheckPrereq(self):
1741 a8083063 Iustin Pop
    """Check prerequisites.
1742 a8083063 Iustin Pop

1743 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1744 a8083063 Iustin Pop
    of nodes is valid.
1745 a8083063 Iustin Pop

1746 a8083063 Iustin Pop
    """
1747 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1748 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1749 dcb93971 Michael Hanselmann
1750 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1751 a8083063 Iustin Pop
1752 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1753 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1754 a8083063 Iustin Pop

1755 a8083063 Iustin Pop
    Args:
1756 a8083063 Iustin Pop
      opts - class with options as members
1757 a8083063 Iustin Pop
      args - list containing a single element, the file name
1758 a8083063 Iustin Pop
    Opts used:
1759 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1760 a8083063 Iustin Pop

1761 a8083063 Iustin Pop
    """
1762 a8083063 Iustin Pop
    filename = self.op.filename
1763 a8083063 Iustin Pop
1764 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
1765 a8083063 Iustin Pop
1766 a7ba5e53 Iustin Pop
    for node in self.nodes:
1767 a8083063 Iustin Pop
      if node == myname:
1768 a8083063 Iustin Pop
        continue
1769 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1770 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1771 a8083063 Iustin Pop
1772 a8083063 Iustin Pop
1773 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1774 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1775 a8083063 Iustin Pop

1776 a8083063 Iustin Pop
  """
1777 a8083063 Iustin Pop
  _OP_REQP = []
1778 a8083063 Iustin Pop
1779 a8083063 Iustin Pop
  def CheckPrereq(self):
1780 a8083063 Iustin Pop
    """No prerequisites.
1781 a8083063 Iustin Pop

1782 a8083063 Iustin Pop
    """
1783 a8083063 Iustin Pop
    pass
1784 a8083063 Iustin Pop
1785 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1786 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1787 a8083063 Iustin Pop

1788 a8083063 Iustin Pop
    """
1789 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1790 a8083063 Iustin Pop
1791 a8083063 Iustin Pop
1792 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1793 a8083063 Iustin Pop
  """Run a command on some nodes.
1794 a8083063 Iustin Pop

1795 a8083063 Iustin Pop
  """
1796 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1797 a8083063 Iustin Pop
1798 a8083063 Iustin Pop
  def CheckPrereq(self):
1799 a8083063 Iustin Pop
    """Check prerequisites.
1800 a8083063 Iustin Pop

1801 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1802 a8083063 Iustin Pop

1803 a8083063 Iustin Pop
    """
1804 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1805 a8083063 Iustin Pop
1806 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1807 a8083063 Iustin Pop
    """Run a command on some nodes.
1808 a8083063 Iustin Pop

1809 a8083063 Iustin Pop
    """
1810 a8083063 Iustin Pop
    data = []
1811 a8083063 Iustin Pop
    for node in self.nodes:
1812 a7ba5e53 Iustin Pop
      result = ssh.SSHCall(node, "root", self.op.command)
1813 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1814 a8083063 Iustin Pop
1815 a8083063 Iustin Pop
    return data
1816 a8083063 Iustin Pop
1817 a8083063 Iustin Pop
1818 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1819 a8083063 Iustin Pop
  """Bring up an instance's disks.
1820 a8083063 Iustin Pop

1821 a8083063 Iustin Pop
  """
1822 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1823 a8083063 Iustin Pop
1824 a8083063 Iustin Pop
  def CheckPrereq(self):
1825 a8083063 Iustin Pop
    """Check prerequisites.
1826 a8083063 Iustin Pop

1827 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1828 a8083063 Iustin Pop

1829 a8083063 Iustin Pop
    """
1830 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1831 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1832 a8083063 Iustin Pop
    if instance is None:
1833 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1834 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1835 a8083063 Iustin Pop
    self.instance = instance
1836 a8083063 Iustin Pop
1837 a8083063 Iustin Pop
1838 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1839 a8083063 Iustin Pop
    """Activate the disks.
1840 a8083063 Iustin Pop

1841 a8083063 Iustin Pop
    """
1842 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1843 a8083063 Iustin Pop
    if not disks_ok:
1844 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1845 a8083063 Iustin Pop
1846 a8083063 Iustin Pop
    return disks_info
1847 a8083063 Iustin Pop
1848 a8083063 Iustin Pop
1849 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1850 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1851 a8083063 Iustin Pop

1852 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1853 a8083063 Iustin Pop

1854 a8083063 Iustin Pop
  Args:
1855 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1856 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1857 a8083063 Iustin Pop
                        in an error return from the function
1858 a8083063 Iustin Pop

1859 a8083063 Iustin Pop
  Returns:
1860 a8083063 Iustin Pop
    false if the operation failed
1861 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1862 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1863 a8083063 Iustin Pop
  """
1864 a8083063 Iustin Pop
  device_info = []
1865 a8083063 Iustin Pop
  disks_ok = True
1866 fdbd668d Iustin Pop
  iname = instance.name
1867 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1868 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1869 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1870 fdbd668d Iustin Pop
1871 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1872 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1873 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1874 fdbd668d Iustin Pop
  # SyncSource, etc.)
1875 fdbd668d Iustin Pop
1876 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1877 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1878 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1879 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1880 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1881 a8083063 Iustin Pop
      if not result:
1882 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1883 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1884 fdbd668d Iustin Pop
        if not ignore_secondaries:
1885 a8083063 Iustin Pop
          disks_ok = False
1886 fdbd668d Iustin Pop
1887 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1888 fdbd668d Iustin Pop
1889 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1890 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1891 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1892 fdbd668d Iustin Pop
      if node != instance.primary_node:
1893 fdbd668d Iustin Pop
        continue
1894 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1895 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1896 fdbd668d Iustin Pop
      if not result:
1897 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1898 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1899 fdbd668d Iustin Pop
        disks_ok = False
1900 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1901 a8083063 Iustin Pop
1902 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1903 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1904 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1905 b352ab5b Iustin Pop
  for disk in instance.disks:
1906 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1907 b352ab5b Iustin Pop
1908 a8083063 Iustin Pop
  return disks_ok, device_info
1909 a8083063 Iustin Pop
1910 a8083063 Iustin Pop
1911 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1912 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1913 3ecf6786 Iustin Pop

1914 3ecf6786 Iustin Pop
  """
1915 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1916 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1917 fe7b0351 Michael Hanselmann
  if not disks_ok:
1918 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1919 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1920 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1921 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1922 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1923 fe7b0351 Michael Hanselmann
1924 fe7b0351 Michael Hanselmann
1925 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1926 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1927 a8083063 Iustin Pop

1928 a8083063 Iustin Pop
  """
1929 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1930 a8083063 Iustin Pop
1931 a8083063 Iustin Pop
  def CheckPrereq(self):
1932 a8083063 Iustin Pop
    """Check prerequisites.
1933 a8083063 Iustin Pop

1934 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1935 a8083063 Iustin Pop

1936 a8083063 Iustin Pop
    """
1937 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1938 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1939 a8083063 Iustin Pop
    if instance is None:
1940 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1941 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1942 a8083063 Iustin Pop
    self.instance = instance
1943 a8083063 Iustin Pop
1944 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1945 a8083063 Iustin Pop
    """Deactivate the disks
1946 a8083063 Iustin Pop

1947 a8083063 Iustin Pop
    """
1948 a8083063 Iustin Pop
    instance = self.instance
1949 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1950 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1951 a8083063 Iustin Pop
    if not type(ins_l) is list:
1952 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1953 3ecf6786 Iustin Pop
                               instance.primary_node)
1954 a8083063 Iustin Pop
1955 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1956 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1957 3ecf6786 Iustin Pop
                               " block devices.")
1958 a8083063 Iustin Pop
1959 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1960 a8083063 Iustin Pop
1961 a8083063 Iustin Pop
1962 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1963 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1964 a8083063 Iustin Pop

1965 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1966 a8083063 Iustin Pop

1967 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1968 a8083063 Iustin Pop
  ignored.
1969 a8083063 Iustin Pop

1970 a8083063 Iustin Pop
  """
1971 a8083063 Iustin Pop
  result = True
1972 a8083063 Iustin Pop
  for disk in instance.disks:
1973 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1974 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1975 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1976 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1977 a8083063 Iustin Pop
                     (disk.iv_name, node))
1978 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1979 a8083063 Iustin Pop
          result = False
1980 a8083063 Iustin Pop
  return result
1981 a8083063 Iustin Pop
1982 a8083063 Iustin Pop
1983 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
1984 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
1985 d4f16fd9 Iustin Pop

1986 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
1987 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
1988 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
1989 d4f16fd9 Iustin Pop
  exception.
1990 d4f16fd9 Iustin Pop

1991 d4f16fd9 Iustin Pop
  Args:
1992 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
1993 d4f16fd9 Iustin Pop
    - node: the node name
1994 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
1995 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
1996 d4f16fd9 Iustin Pop

1997 d4f16fd9 Iustin Pop
  """
1998 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
1999 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
2000 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
2001 d4f16fd9 Iustin Pop
                             " information" % (node,))
2002 d4f16fd9 Iustin Pop
2003 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2004 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2005 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2006 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2007 d4f16fd9 Iustin Pop
  if requested > free_mem:
2008 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2009 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2010 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2011 d4f16fd9 Iustin Pop
2012 d4f16fd9 Iustin Pop
2013 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2014 a8083063 Iustin Pop
  """Starts an instance.
2015 a8083063 Iustin Pop

2016 a8083063 Iustin Pop
  """
2017 a8083063 Iustin Pop
  HPATH = "instance-start"
2018 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2019 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2020 a8083063 Iustin Pop
2021 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2022 a8083063 Iustin Pop
    """Build hooks env.
2023 a8083063 Iustin Pop

2024 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2025 a8083063 Iustin Pop

2026 a8083063 Iustin Pop
    """
2027 a8083063 Iustin Pop
    env = {
2028 a8083063 Iustin Pop
      "FORCE": self.op.force,
2029 a8083063 Iustin Pop
      }
2030 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2031 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2032 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2033 a8083063 Iustin Pop
    return env, nl, nl
2034 a8083063 Iustin Pop
2035 a8083063 Iustin Pop
  def CheckPrereq(self):
2036 a8083063 Iustin Pop
    """Check prerequisites.
2037 a8083063 Iustin Pop

2038 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2039 a8083063 Iustin Pop

2040 a8083063 Iustin Pop
    """
2041 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2042 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2043 a8083063 Iustin Pop
    if instance is None:
2044 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2045 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2046 a8083063 Iustin Pop
2047 a8083063 Iustin Pop
    # check bridges existance
2048 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2049 a8083063 Iustin Pop
2050 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2051 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2052 d4f16fd9 Iustin Pop
                         instance.memory)
2053 d4f16fd9 Iustin Pop
2054 a8083063 Iustin Pop
    self.instance = instance
2055 a8083063 Iustin Pop
    self.op.instance_name = instance.name
2056 a8083063 Iustin Pop
2057 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2058 a8083063 Iustin Pop
    """Start the instance.
2059 a8083063 Iustin Pop

2060 a8083063 Iustin Pop
    """
2061 a8083063 Iustin Pop
    instance = self.instance
2062 a8083063 Iustin Pop
    force = self.op.force
2063 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2064 a8083063 Iustin Pop
2065 a8083063 Iustin Pop
    node_current = instance.primary_node
2066 a8083063 Iustin Pop
2067 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2068 a8083063 Iustin Pop
2069 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2070 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2071 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2072 a8083063 Iustin Pop
2073 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2074 a8083063 Iustin Pop
2075 a8083063 Iustin Pop
2076 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2077 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2078 bf6929a2 Alexander Schreiber

2079 bf6929a2 Alexander Schreiber
  """
2080 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2081 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2082 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2083 bf6929a2 Alexander Schreiber
2084 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2085 bf6929a2 Alexander Schreiber
    """Build hooks env.
2086 bf6929a2 Alexander Schreiber

2087 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2088 bf6929a2 Alexander Schreiber

2089 bf6929a2 Alexander Schreiber
    """
2090 bf6929a2 Alexander Schreiber
    env = {
2091 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2092 bf6929a2 Alexander Schreiber
      }
2093 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2094 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2095 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2096 bf6929a2 Alexander Schreiber
    return env, nl, nl
2097 bf6929a2 Alexander Schreiber
2098 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2099 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2100 bf6929a2 Alexander Schreiber

2101 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2102 bf6929a2 Alexander Schreiber

2103 bf6929a2 Alexander Schreiber
    """
2104 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2105 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2106 bf6929a2 Alexander Schreiber
    if instance is None:
2107 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2108 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2109 bf6929a2 Alexander Schreiber
2110 bf6929a2 Alexander Schreiber
    # check bridges existance
2111 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2112 bf6929a2 Alexander Schreiber
2113 bf6929a2 Alexander Schreiber
    self.instance = instance
2114 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2115 bf6929a2 Alexander Schreiber
2116 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2117 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2118 bf6929a2 Alexander Schreiber

2119 bf6929a2 Alexander Schreiber
    """
2120 bf6929a2 Alexander Schreiber
    instance = self.instance
2121 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2122 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2123 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2124 bf6929a2 Alexander Schreiber
2125 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2126 bf6929a2 Alexander Schreiber
2127 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2128 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2129 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2130 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2131 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2132 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2133 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2134 bf6929a2 Alexander Schreiber
2135 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2136 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2137 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2138 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2139 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2140 bf6929a2 Alexander Schreiber
    else:
2141 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2142 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2143 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2144 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2145 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2146 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2147 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2148 bf6929a2 Alexander Schreiber
2149 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2150 bf6929a2 Alexander Schreiber
2151 bf6929a2 Alexander Schreiber
2152 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2153 a8083063 Iustin Pop
  """Shutdown an instance.
2154 a8083063 Iustin Pop

2155 a8083063 Iustin Pop
  """
2156 a8083063 Iustin Pop
  HPATH = "instance-stop"
2157 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2158 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2159 a8083063 Iustin Pop
2160 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2161 a8083063 Iustin Pop
    """Build hooks env.
2162 a8083063 Iustin Pop

2163 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2164 a8083063 Iustin Pop

2165 a8083063 Iustin Pop
    """
2166 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2167 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2168 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2169 a8083063 Iustin Pop
    return env, nl, nl
2170 a8083063 Iustin Pop
2171 a8083063 Iustin Pop
  def CheckPrereq(self):
2172 a8083063 Iustin Pop
    """Check prerequisites.
2173 a8083063 Iustin Pop

2174 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2175 a8083063 Iustin Pop

2176 a8083063 Iustin Pop
    """
2177 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2178 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2179 a8083063 Iustin Pop
    if instance is None:
2180 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2181 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2182 a8083063 Iustin Pop
    self.instance = instance
2183 a8083063 Iustin Pop
2184 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2185 a8083063 Iustin Pop
    """Shutdown the instance.
2186 a8083063 Iustin Pop

2187 a8083063 Iustin Pop
    """
2188 a8083063 Iustin Pop
    instance = self.instance
2189 a8083063 Iustin Pop
    node_current = instance.primary_node
2190 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2191 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2192 a8083063 Iustin Pop
2193 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2194 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2195 a8083063 Iustin Pop
2196 a8083063 Iustin Pop
2197 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2198 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2199 fe7b0351 Michael Hanselmann

2200 fe7b0351 Michael Hanselmann
  """
2201 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2202 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2203 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2204 fe7b0351 Michael Hanselmann
2205 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2206 fe7b0351 Michael Hanselmann
    """Build hooks env.
2207 fe7b0351 Michael Hanselmann

2208 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2209 fe7b0351 Michael Hanselmann

2210 fe7b0351 Michael Hanselmann
    """
2211 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2212 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2213 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2214 fe7b0351 Michael Hanselmann
    return env, nl, nl
2215 fe7b0351 Michael Hanselmann
2216 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2217 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2218 fe7b0351 Michael Hanselmann

2219 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2220 fe7b0351 Michael Hanselmann

2221 fe7b0351 Michael Hanselmann
    """
2222 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2223 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2224 fe7b0351 Michael Hanselmann
    if instance is None:
2225 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2226 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2227 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2228 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2229 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2230 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2231 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2232 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2233 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2234 fe7b0351 Michael Hanselmann
    if remote_info:
2235 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2236 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2237 3ecf6786 Iustin Pop
                                  instance.primary_node))
2238 d0834de3 Michael Hanselmann
2239 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2240 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2241 d0834de3 Michael Hanselmann
      # OS verification
2242 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2243 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2244 d0834de3 Michael Hanselmann
      if pnode is None:
2245 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2246 3ecf6786 Iustin Pop
                                   self.op.pnode)
2247 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2248 dfa96ded Guido Trotter
      if not os_obj:
2249 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2250 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2251 d0834de3 Michael Hanselmann
2252 fe7b0351 Michael Hanselmann
    self.instance = instance
2253 fe7b0351 Michael Hanselmann
2254 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2255 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2256 fe7b0351 Michael Hanselmann

2257 fe7b0351 Michael Hanselmann
    """
2258 fe7b0351 Michael Hanselmann
    inst = self.instance
2259 fe7b0351 Michael Hanselmann
2260 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2261 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2262 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2263 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2264 d0834de3 Michael Hanselmann
2265 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2266 fe7b0351 Michael Hanselmann
    try:
2267 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2268 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2269 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2270 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2271 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2272 fe7b0351 Michael Hanselmann
    finally:
2273 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2274 fe7b0351 Michael Hanselmann
2275 fe7b0351 Michael Hanselmann
2276 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2277 decd5f45 Iustin Pop
  """Rename an instance.
2278 decd5f45 Iustin Pop

2279 decd5f45 Iustin Pop
  """
2280 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2281 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2282 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2283 decd5f45 Iustin Pop
2284 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2285 decd5f45 Iustin Pop
    """Build hooks env.
2286 decd5f45 Iustin Pop

2287 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2288 decd5f45 Iustin Pop

2289 decd5f45 Iustin Pop
    """
2290 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2291 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2292 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2293 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2294 decd5f45 Iustin Pop
    return env, nl, nl
2295 decd5f45 Iustin Pop
2296 decd5f45 Iustin Pop
  def CheckPrereq(self):
2297 decd5f45 Iustin Pop
    """Check prerequisites.
2298 decd5f45 Iustin Pop

2299 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2300 decd5f45 Iustin Pop

2301 decd5f45 Iustin Pop
    """
2302 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2303 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2304 decd5f45 Iustin Pop
    if instance is None:
2305 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2306 decd5f45 Iustin Pop
                                 self.op.instance_name)
2307 decd5f45 Iustin Pop
    if instance.status != "down":
2308 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2309 decd5f45 Iustin Pop
                                 self.op.instance_name)
2310 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2311 decd5f45 Iustin Pop
    if remote_info:
2312 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2313 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2314 decd5f45 Iustin Pop
                                  instance.primary_node))
2315 decd5f45 Iustin Pop
    self.instance = instance
2316 decd5f45 Iustin Pop
2317 decd5f45 Iustin Pop
    # new name verification
2318 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2319 decd5f45 Iustin Pop
2320 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2321 7bde3275 Guido Trotter
    instance_list = self.cfg.GetInstanceList()
2322 7bde3275 Guido Trotter
    if new_name in instance_list:
2323 7bde3275 Guido Trotter
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2324 7bde3275 Guido Trotter
                                 instance_name)
2325 7bde3275 Guido Trotter
2326 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2327 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2328 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2329 decd5f45 Iustin Pop
      if not result.failed:
2330 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2331 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2332 decd5f45 Iustin Pop
2333 decd5f45 Iustin Pop
2334 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2335 decd5f45 Iustin Pop
    """Reinstall the instance.
2336 decd5f45 Iustin Pop

2337 decd5f45 Iustin Pop
    """
2338 decd5f45 Iustin Pop
    inst = self.instance
2339 decd5f45 Iustin Pop
    old_name = inst.name
2340 decd5f45 Iustin Pop
2341 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2342 decd5f45 Iustin Pop
2343 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2344 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2345 decd5f45 Iustin Pop
2346 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2347 decd5f45 Iustin Pop
    try:
2348 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2349 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2350 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2351 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2352 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2353 decd5f45 Iustin Pop
        logger.Error(msg)
2354 decd5f45 Iustin Pop
    finally:
2355 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2356 decd5f45 Iustin Pop
2357 decd5f45 Iustin Pop
2358 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2359 a8083063 Iustin Pop
  """Remove an instance.
2360 a8083063 Iustin Pop

2361 a8083063 Iustin Pop
  """
2362 a8083063 Iustin Pop
  HPATH = "instance-remove"
2363 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2364 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2365 a8083063 Iustin Pop
2366 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2367 a8083063 Iustin Pop
    """Build hooks env.
2368 a8083063 Iustin Pop

2369 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2370 a8083063 Iustin Pop

2371 a8083063 Iustin Pop
    """
2372 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2373 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2374 a8083063 Iustin Pop
    return env, nl, nl
2375 a8083063 Iustin Pop
2376 a8083063 Iustin Pop
  def CheckPrereq(self):
2377 a8083063 Iustin Pop
    """Check prerequisites.
2378 a8083063 Iustin Pop

2379 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2380 a8083063 Iustin Pop

2381 a8083063 Iustin Pop
    """
2382 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2383 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2384 a8083063 Iustin Pop
    if instance is None:
2385 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2386 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2387 a8083063 Iustin Pop
    self.instance = instance
2388 a8083063 Iustin Pop
2389 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2390 a8083063 Iustin Pop
    """Remove the instance.
2391 a8083063 Iustin Pop

2392 a8083063 Iustin Pop
    """
2393 a8083063 Iustin Pop
    instance = self.instance
2394 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2395 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2396 a8083063 Iustin Pop
2397 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2398 1d67656e Iustin Pop
      if self.op.ignore_failures:
2399 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2400 1d67656e Iustin Pop
      else:
2401 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2402 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2403 a8083063 Iustin Pop
2404 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2405 a8083063 Iustin Pop
2406 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2407 1d67656e Iustin Pop
      if self.op.ignore_failures:
2408 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2409 1d67656e Iustin Pop
      else:
2410 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2411 a8083063 Iustin Pop
2412 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2413 a8083063 Iustin Pop
2414 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2415 a8083063 Iustin Pop
2416 a8083063 Iustin Pop
2417 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2418 a8083063 Iustin Pop
  """Logical unit for querying instances.
2419 a8083063 Iustin Pop

2420 a8083063 Iustin Pop
  """
2421 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2422 a8083063 Iustin Pop
2423 a8083063 Iustin Pop
  def CheckPrereq(self):
2424 a8083063 Iustin Pop
    """Check prerequisites.
2425 a8083063 Iustin Pop

2426 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2427 a8083063 Iustin Pop

2428 a8083063 Iustin Pop
    """
2429 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2430 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2431 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2432 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2433 d6d415e8 Iustin Pop
                               "sda_size", "sdb_size", "vcpus"],
2434 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2435 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2436 a8083063 Iustin Pop
2437 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2438 069dcc86 Iustin Pop
2439 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2440 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2441 a8083063 Iustin Pop

2442 a8083063 Iustin Pop
    """
2443 069dcc86 Iustin Pop
    instance_names = self.wanted
2444 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2445 a8083063 Iustin Pop
                     in instance_names]
2446 a8083063 Iustin Pop
2447 a8083063 Iustin Pop
    # begin data gathering
2448 a8083063 Iustin Pop
2449 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2450 a8083063 Iustin Pop
2451 a8083063 Iustin Pop
    bad_nodes = []
2452 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2453 a8083063 Iustin Pop
      live_data = {}
2454 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2455 a8083063 Iustin Pop
      for name in nodes:
2456 a8083063 Iustin Pop
        result = node_data[name]
2457 a8083063 Iustin Pop
        if result:
2458 a8083063 Iustin Pop
          live_data.update(result)
2459 a8083063 Iustin Pop
        elif result == False:
2460 a8083063 Iustin Pop
          bad_nodes.append(name)
2461 a8083063 Iustin Pop
        # else no instance is alive
2462 a8083063 Iustin Pop
    else:
2463 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2464 a8083063 Iustin Pop
2465 a8083063 Iustin Pop
    # end data gathering
2466 a8083063 Iustin Pop
2467 a8083063 Iustin Pop
    output = []
2468 a8083063 Iustin Pop
    for instance in instance_list:
2469 a8083063 Iustin Pop
      iout = []
2470 a8083063 Iustin Pop
      for field in self.op.output_fields:
2471 a8083063 Iustin Pop
        if field == "name":
2472 a8083063 Iustin Pop
          val = instance.name
2473 a8083063 Iustin Pop
        elif field == "os":
2474 a8083063 Iustin Pop
          val = instance.os
2475 a8083063 Iustin Pop
        elif field == "pnode":
2476 a8083063 Iustin Pop
          val = instance.primary_node
2477 a8083063 Iustin Pop
        elif field == "snodes":
2478 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2479 a8083063 Iustin Pop
        elif field == "admin_state":
2480 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2481 a8083063 Iustin Pop
        elif field == "oper_state":
2482 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2483 8a23d2d3 Iustin Pop
            val = None
2484 a8083063 Iustin Pop
          else:
2485 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2486 d8052456 Iustin Pop
        elif field == "status":
2487 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2488 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2489 d8052456 Iustin Pop
          else:
2490 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2491 d8052456 Iustin Pop
            if running:
2492 d8052456 Iustin Pop
              if instance.status != "down":
2493 d8052456 Iustin Pop
                val = "running"
2494 d8052456 Iustin Pop
              else:
2495 d8052456 Iustin Pop
                val = "ERROR_up"
2496 d8052456 Iustin Pop
            else:
2497 d8052456 Iustin Pop
              if instance.status != "down":
2498 d8052456 Iustin Pop
                val = "ERROR_down"
2499 d8052456 Iustin Pop
              else:
2500 d8052456 Iustin Pop
                val = "ADMIN_down"
2501 a8083063 Iustin Pop
        elif field == "admin_ram":
2502 a8083063 Iustin Pop
          val = instance.memory
2503 a8083063 Iustin Pop
        elif field == "oper_ram":
2504 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2505 8a23d2d3 Iustin Pop
            val = None
2506 a8083063 Iustin Pop
          elif instance.name in live_data:
2507 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2508 a8083063 Iustin Pop
          else:
2509 a8083063 Iustin Pop
            val = "-"
2510 a8083063 Iustin Pop
        elif field == "disk_template":
2511 a8083063 Iustin Pop
          val = instance.disk_template
2512 a8083063 Iustin Pop
        elif field == "ip":
2513 a8083063 Iustin Pop
          val = instance.nics[0].ip
2514 a8083063 Iustin Pop
        elif field == "bridge":
2515 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2516 a8083063 Iustin Pop
        elif field == "mac":
2517 a8083063 Iustin Pop
          val = instance.nics[0].mac
2518 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2519 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2520 644eeef9 Iustin Pop
          if disk is None:
2521 8a23d2d3 Iustin Pop
            val = None
2522 644eeef9 Iustin Pop
          else:
2523 644eeef9 Iustin Pop
            val = disk.size
2524 d6d415e8 Iustin Pop
        elif field == "vcpus":
2525 d6d415e8 Iustin Pop
          val = instance.vcpus
2526 a8083063 Iustin Pop
        else:
2527 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2528 a8083063 Iustin Pop
        iout.append(val)
2529 a8083063 Iustin Pop
      output.append(iout)
2530 a8083063 Iustin Pop
2531 a8083063 Iustin Pop
    return output
2532 a8083063 Iustin Pop
2533 a8083063 Iustin Pop
2534 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2535 a8083063 Iustin Pop
  """Failover an instance.
2536 a8083063 Iustin Pop

2537 a8083063 Iustin Pop
  """
2538 a8083063 Iustin Pop
  HPATH = "instance-failover"
2539 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2540 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2541 a8083063 Iustin Pop
2542 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2543 a8083063 Iustin Pop
    """Build hooks env.
2544 a8083063 Iustin Pop

2545 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2546 a8083063 Iustin Pop

2547 a8083063 Iustin Pop
    """
2548 a8083063 Iustin Pop
    env = {
2549 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2550 a8083063 Iustin Pop
      }
2551 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2552 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2553 a8083063 Iustin Pop
    return env, nl, nl
2554 a8083063 Iustin Pop
2555 a8083063 Iustin Pop
  def CheckPrereq(self):
2556 a8083063 Iustin Pop
    """Check prerequisites.
2557 a8083063 Iustin Pop

2558 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2559 a8083063 Iustin Pop

2560 a8083063 Iustin Pop
    """
2561 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2562 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2563 a8083063 Iustin Pop
    if instance is None:
2564 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2565 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2566 a8083063 Iustin Pop
2567 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2568 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2569 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2570 2a710df1 Michael Hanselmann
2571 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2572 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2573 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2574 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2575 2a710df1 Michael Hanselmann
2576 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2577 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2578 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2579 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2580 3a7c308e Guido Trotter
2581 a8083063 Iustin Pop
    # check bridge existance
2582 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2583 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2584 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2585 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2586 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2587 a8083063 Iustin Pop
2588 a8083063 Iustin Pop
    self.instance = instance
2589 a8083063 Iustin Pop
2590 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2591 a8083063 Iustin Pop
    """Failover an instance.
2592 a8083063 Iustin Pop

2593 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2594 a8083063 Iustin Pop
    starting it on the secondary.
2595 a8083063 Iustin Pop

2596 a8083063 Iustin Pop
    """
2597 a8083063 Iustin Pop
    instance = self.instance
2598 a8083063 Iustin Pop
2599 a8083063 Iustin Pop
    source_node = instance.primary_node
2600 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2601 a8083063 Iustin Pop
2602 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2603 a8083063 Iustin Pop
    for dev in instance.disks:
2604 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2605 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2606 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2607 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2608 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2609 a8083063 Iustin Pop
2610 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2611 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2612 a8083063 Iustin Pop
                (instance.name, source_node))
2613 a8083063 Iustin Pop
2614 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2615 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2616 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2617 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2618 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2619 24a40d57 Iustin Pop
      else:
2620 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2621 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2622 a8083063 Iustin Pop
2623 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2624 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2625 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2626 a8083063 Iustin Pop
2627 a8083063 Iustin Pop
    instance.primary_node = target_node
2628 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2629 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2630 a8083063 Iustin Pop
2631 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2632 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2633 a8083063 Iustin Pop
                (instance.name, target_node))
2634 a8083063 Iustin Pop
2635 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2636 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2637 a8083063 Iustin Pop
    if not disks_ok:
2638 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2639 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2640 a8083063 Iustin Pop
2641 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2642 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2643 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2644 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2645 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2646 a8083063 Iustin Pop
2647 a8083063 Iustin Pop
2648 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2649 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2650 a8083063 Iustin Pop

2651 a8083063 Iustin Pop
  This always creates all devices.
2652 a8083063 Iustin Pop

2653 a8083063 Iustin Pop
  """
2654 a8083063 Iustin Pop
  if device.children:
2655 a8083063 Iustin Pop
    for child in device.children:
2656 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2657 a8083063 Iustin Pop
        return False
2658 a8083063 Iustin Pop
2659 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2660 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2661 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2662 a8083063 Iustin Pop
  if not new_id:
2663 a8083063 Iustin Pop
    return False
2664 a8083063 Iustin Pop
  if device.physical_id is None:
2665 a8083063 Iustin Pop
    device.physical_id = new_id
2666 a8083063 Iustin Pop
  return True
2667 a8083063 Iustin Pop
2668 a8083063 Iustin Pop
2669 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2670 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2671 a8083063 Iustin Pop

2672 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2673 a8083063 Iustin Pop
  all its children.
2674 a8083063 Iustin Pop

2675 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2676 a8083063 Iustin Pop

2677 a8083063 Iustin Pop
  """
2678 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2679 a8083063 Iustin Pop
    force = True
2680 a8083063 Iustin Pop
  if device.children:
2681 a8083063 Iustin Pop
    for child in device.children:
2682 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2683 3f78eef2 Iustin Pop
                                        child, force, info):
2684 a8083063 Iustin Pop
        return False
2685 a8083063 Iustin Pop
2686 a8083063 Iustin Pop
  if not force:
2687 a8083063 Iustin Pop
    return True
2688 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2689 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2690 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2691 a8083063 Iustin Pop
  if not new_id:
2692 a8083063 Iustin Pop
    return False
2693 a8083063 Iustin Pop
  if device.physical_id is None:
2694 a8083063 Iustin Pop
    device.physical_id = new_id
2695 a8083063 Iustin Pop
  return True
2696 a8083063 Iustin Pop
2697 a8083063 Iustin Pop
2698 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2699 923b1523 Iustin Pop
  """Generate a suitable LV name.
2700 923b1523 Iustin Pop

2701 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2702 923b1523 Iustin Pop

2703 923b1523 Iustin Pop
  """
2704 923b1523 Iustin Pop
  results = []
2705 923b1523 Iustin Pop
  for val in exts:
2706 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2707 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2708 923b1523 Iustin Pop
  return results
2709 923b1523 Iustin Pop
2710 923b1523 Iustin Pop
2711 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2712 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2713 a8083063 Iustin Pop

2714 a8083063 Iustin Pop
  """
2715 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2716 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2717 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2718 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2719 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2720 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2721 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
2722 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2723 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2724 a8083063 Iustin Pop
  return drbd_dev
2725 a8083063 Iustin Pop
2726 a8083063 Iustin Pop
2727 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2728 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2729 a1f445d3 Iustin Pop

2730 a1f445d3 Iustin Pop
  """
2731 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2732 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2733 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2734 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2735 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2736 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2737 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2738 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2739 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2740 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2741 a1f445d3 Iustin Pop
  return drbd_dev
2742 a1f445d3 Iustin Pop
2743 7c0d6283 Michael Hanselmann
2744 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2745 a8083063 Iustin Pop
                          instance_name, primary_node,
2746 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2747 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2748 a8083063 Iustin Pop

2749 a8083063 Iustin Pop
  """
2750 a8083063 Iustin Pop
  #TODO: compute space requirements
2751 a8083063 Iustin Pop
2752 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2753 3517d9b9 Manuel Franceschini
  if template_name == constants.DT_DISKLESS:
2754 a8083063 Iustin Pop
    disks = []
2755 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_PLAIN:
2756 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2757 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2758 923b1523 Iustin Pop
2759 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2760 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2761 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2762 a8083063 Iustin Pop
                           iv_name = "sda")
2763 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2764 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2765 a8083063 Iustin Pop
                           iv_name = "sdb")
2766 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2767 3517d9b9 Manuel Franceschini
  elif template_name == constants.DT_LOCAL_RAID1:
2768 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2769 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2770 923b1523 Iustin Pop
2771 923b1523 Iustin Pop
2772 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2773 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2774 fe96220b Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2775 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2776 fe96220b Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2777 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2778 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
2779 a8083063 Iustin Pop
                              size=disk_sz,
2780 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2781 fe96220b Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2782 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2783 fe96220b Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2784 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2785 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
2786 a8083063 Iustin Pop
                              size=swap_sz,
2787 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2788 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2789 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2790 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2791 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2792 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2793 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2794 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2795 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2796 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2797 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
2798 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2799 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2800 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2801 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
2802 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2803 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2804 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2805 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2806 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2807 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2808 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2809 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2810 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2811 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2812 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2813 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2814 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2815 a8083063 Iustin Pop
  else:
2816 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2817 a8083063 Iustin Pop
  return disks
2818 a8083063 Iustin Pop
2819 a8083063 Iustin Pop
2820 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2821 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2822 3ecf6786 Iustin Pop

2823 3ecf6786 Iustin Pop
  """
2824 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2825 a0c3fea1 Michael Hanselmann
2826 a0c3fea1 Michael Hanselmann
2827 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2828 a8083063 Iustin Pop
  """Create all disks for an instance.
2829 a8083063 Iustin Pop

2830 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2831 a8083063 Iustin Pop

2832 a8083063 Iustin Pop
  Args:
2833 a8083063 Iustin Pop
    instance: the instance object
2834 a8083063 Iustin Pop

2835 a8083063 Iustin Pop
  Returns:
2836 a8083063 Iustin Pop
    True or False showing the success of the creation process
2837 a8083063 Iustin Pop

2838 a8083063 Iustin Pop
  """
2839 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2840 a0c3fea1 Michael Hanselmann
2841 a8083063 Iustin Pop
  for device in instance.disks:
2842 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2843 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2844 a8083063 Iustin Pop
    #HARDCODE
2845 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2846 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2847 3f78eef2 Iustin Pop
                                        device, False, info):
2848 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2849 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2850 a8083063 Iustin Pop
        return False
2851 a8083063 Iustin Pop
    #HARDCODE
2852 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2853 3f78eef2 Iustin Pop
                                    instance, device, info):
2854 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2855 a8083063 Iustin Pop
                   device.iv_name)
2856 a8083063 Iustin Pop
      return False
2857 a8083063 Iustin Pop
  return True
2858 a8083063 Iustin Pop
2859 a8083063 Iustin Pop
2860 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2861 a8083063 Iustin Pop
  """Remove all disks for an instance.
2862 a8083063 Iustin Pop

2863 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2864 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2865 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2866 a8083063 Iustin Pop
  with `_CreateDisks()`).
2867 a8083063 Iustin Pop

2868 a8083063 Iustin Pop
  Args:
2869 a8083063 Iustin Pop
    instance: the instance object
2870 a8083063 Iustin Pop

2871 a8083063 Iustin Pop
  Returns:
2872 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2873 a8083063 Iustin Pop

2874 a8083063 Iustin Pop
  """
2875 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2876 a8083063 Iustin Pop
2877 a8083063 Iustin Pop
  result = True
2878 a8083063 Iustin Pop
  for device in instance.disks:
2879 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2880 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2881 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2882 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2883 a8083063 Iustin Pop
                     " continuing anyway" %
2884 a8083063 Iustin Pop
                     (device.iv_name, node))
2885 a8083063 Iustin Pop
        result = False
2886 a8083063 Iustin Pop
  return result
2887 a8083063 Iustin Pop
2888 a8083063 Iustin Pop
2889 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2890 a8083063 Iustin Pop
  """Create an instance.
2891 a8083063 Iustin Pop

2892 a8083063 Iustin Pop
  """
2893 a8083063 Iustin Pop
  HPATH = "instance-add"
2894 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2895 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2896 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2897 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
2898 a8083063 Iustin Pop
2899 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2900 a8083063 Iustin Pop
    """Build hooks env.
2901 a8083063 Iustin Pop

2902 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2903 a8083063 Iustin Pop

2904 a8083063 Iustin Pop
    """
2905 a8083063 Iustin Pop
    env = {
2906 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2907 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2908 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2909 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2910 a8083063 Iustin Pop
      }
2911 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2912 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2913 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2914 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2915 396e1b78 Michael Hanselmann
2916 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2917 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2918 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2919 396e1b78 Michael Hanselmann
      status=self.instance_status,
2920 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2921 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2922 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2923 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
2924 396e1b78 Michael Hanselmann
    ))
2925 a8083063 Iustin Pop
2926 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2927 a8083063 Iustin Pop
          self.secondaries)
2928 a8083063 Iustin Pop
    return env, nl, nl
2929 a8083063 Iustin Pop
2930 a8083063 Iustin Pop
2931 a8083063 Iustin Pop
  def CheckPrereq(self):
2932 a8083063 Iustin Pop
    """Check prerequisites.
2933 a8083063 Iustin Pop

2934 a8083063 Iustin Pop
    """
2935 40ed12dd Guido Trotter
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order"]:
2936 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
2937 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
2938 40ed12dd Guido Trotter
2939 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2940 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2941 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2942 3ecf6786 Iustin Pop
                                 self.op.mode)
2943 a8083063 Iustin Pop
2944 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2945 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2946 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2947 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2948 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2949 3ecf6786 Iustin Pop
                                   " node and path options")
2950 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2951 a8083063 Iustin Pop
      if src_node_full is None:
2952 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2953 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2954 a8083063 Iustin Pop
2955 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2956 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2957 a8083063 Iustin Pop
2958 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2959 a8083063 Iustin Pop
2960 a8083063 Iustin Pop
      if not export_info:
2961 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2962 a8083063 Iustin Pop
2963 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2964 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2965 a8083063 Iustin Pop
2966 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2967 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2968 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2969 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2970 a8083063 Iustin Pop
2971 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2972 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2973 3ecf6786 Iustin Pop
                                   " one data disk")
2974 a8083063 Iustin Pop
2975 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2976 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2977 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2978 a8083063 Iustin Pop
                                                         'disk0_dump'))
2979 a8083063 Iustin Pop
      self.src_image = diskimage
2980 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2981 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2982 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2983 a8083063 Iustin Pop
2984 a8083063 Iustin Pop
    # check primary node
2985 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2986 a8083063 Iustin Pop
    if pnode is None:
2987 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2988 3ecf6786 Iustin Pop
                                 self.op.pnode)
2989 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2990 a8083063 Iustin Pop
    self.pnode = pnode
2991 a8083063 Iustin Pop
    self.secondaries = []
2992 a8083063 Iustin Pop
    # disk template and mirror node verification
2993 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2994 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2995 a8083063 Iustin Pop
2996 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
2997 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2998 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
2999 3ecf6786 Iustin Pop
                                   " a mirror node")
3000 a8083063 Iustin Pop
3001 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
3002 a8083063 Iustin Pop
      if snode_name is None:
3003 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
3004 3ecf6786 Iustin Pop
                                   self.op.snode)
3005 a8083063 Iustin Pop
      elif snode_name == pnode.name:
3006 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
3007 3ecf6786 Iustin Pop
                                   " the primary node.")
3008 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3009 a8083063 Iustin Pop
3010 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
3011 ed1ebc60 Guido Trotter
    req_size_dict = {
3012 8d75db10 Iustin Pop
      constants.DT_DISKLESS: None,
3013 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
3014 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
3015 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
3016 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
3017 a1f445d3 Iustin Pop
      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
3018 ed1ebc60 Guido Trotter
    }
3019 ed1ebc60 Guido Trotter
3020 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
3021 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
3022 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
3023 ed1ebc60 Guido Trotter
3024 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
3025 ed1ebc60 Guido Trotter
3026 8d75db10 Iustin Pop
    # Check lv size requirements
3027 8d75db10 Iustin Pop
    if req_size is not None:
3028 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3029 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3030 8d75db10 Iustin Pop
      for node in nodenames:
3031 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3032 8d75db10 Iustin Pop
        if not info:
3033 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3034 8d75db10 Iustin Pop
                                     " from node '%s'" % nodeinfo)
3035 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3036 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3037 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3038 8d75db10 Iustin Pop
                                     " node %s" % node)
3039 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3040 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3041 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3042 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3043 ed1ebc60 Guido Trotter
3044 a8083063 Iustin Pop
    # os verification
3045 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3046 dfa96ded Guido Trotter
    if not os_obj:
3047 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3048 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3049 a8083063 Iustin Pop
3050 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3051 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3052 3b6d8c9b Iustin Pop
3053 a8083063 Iustin Pop
    # instance verification
3054 89e1fc26 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3055 a8083063 Iustin Pop
3056 bcf043c9 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3057 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3058 a8083063 Iustin Pop
    if instance_name in instance_list:
3059 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3060 3ecf6786 Iustin Pop
                                 instance_name)
3061 a8083063 Iustin Pop
3062 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
3063 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
3064 a8083063 Iustin Pop
      inst_ip = None
3065 a8083063 Iustin Pop
    elif ip.lower() == "auto":
3066 bcf043c9 Iustin Pop
      inst_ip = hostname1.ip
3067 a8083063 Iustin Pop
    else:
3068 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
3069 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3070 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
3071 a8083063 Iustin Pop
      inst_ip = ip
3072 a8083063 Iustin Pop
    self.inst_ip = inst_ip
3073 a8083063 Iustin Pop
3074 bdd55f71 Iustin Pop
    if self.op.start and not self.op.ip_check:
3075 bdd55f71 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3076 bdd55f71 Iustin Pop
                                 " adding an instance in start mode")
3077 bdd55f71 Iustin Pop
3078 bdd55f71 Iustin Pop
    if self.op.ip_check:
3079 16abfbc2 Alexander Schreiber
      if utils.TcpPing(utils.HostInfo().name, hostname1.ip,
3080 16abfbc2 Alexander Schreiber
                       constants.DEFAULT_NODED_PORT):
3081 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3082 16abfbc2 Alexander Schreiber
                                   (hostname1.ip, instance_name))
3083 a8083063 Iustin Pop
3084 1862d460 Alexander Schreiber
    # MAC address verification
3085 1862d460 Alexander Schreiber
    if self.op.mac != "auto":
3086 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.op.mac.lower()):
3087 1862d460 Alexander Schreiber
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3088 1862d460 Alexander Schreiber
                                   self.op.mac)
3089 1862d460 Alexander Schreiber
3090 a8083063 Iustin Pop
    # bridge verification
3091 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3092 a8083063 Iustin Pop
    if bridge is None:
3093 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3094 a8083063 Iustin Pop
    else:
3095 a8083063 Iustin Pop
      self.op.bridge = bridge
3096 a8083063 Iustin Pop
3097 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3098 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3099 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3100 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3101 a8083063 Iustin Pop
3102 25c5878d Alexander Schreiber
    # boot order verification
3103 25c5878d Alexander Schreiber
    if self.op.hvm_boot_order is not None:
3104 25c5878d Alexander Schreiber
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3105 b08d5a87 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3106 b08d5a87 Iustin Pop
                                   " must be one or more of [acdn]")
3107 25c5878d Alexander Schreiber
3108 a8083063 Iustin Pop
    if self.op.start:
3109 a8083063 Iustin Pop
      self.instance_status = 'up'
3110 a8083063 Iustin Pop
    else:
3111 a8083063 Iustin Pop
      self.instance_status = 'down'
3112 a8083063 Iustin Pop
3113 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3114 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3115 a8083063 Iustin Pop

3116 a8083063 Iustin Pop
    """
3117 a8083063 Iustin Pop
    instance = self.op.instance_name
3118 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3119 a8083063 Iustin Pop
3120 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3121 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3122 1862d460 Alexander Schreiber
    else:
3123 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3124 1862d460 Alexander Schreiber
3125 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3126 a8083063 Iustin Pop
    if self.inst_ip is not None:
3127 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3128 a8083063 Iustin Pop
3129 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3130 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3131 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3132 2a6469d5 Alexander Schreiber
    else:
3133 2a6469d5 Alexander Schreiber
      network_port = None
3134 58acb49d Alexander Schreiber
3135 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3136 a8083063 Iustin Pop
                                  self.op.disk_template,
3137 a8083063 Iustin Pop
                                  instance, pnode_name,
3138 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3139 a8083063 Iustin Pop
                                  self.op.swap_size)
3140 a8083063 Iustin Pop
3141 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3142 a8083063 Iustin Pop
                            primary_node=pnode_name,
3143 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3144 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3145 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3146 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3147 a8083063 Iustin Pop
                            status=self.instance_status,
3148 58acb49d Alexander Schreiber
                            network_port=network_port,
3149 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3150 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3151 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3152 a8083063 Iustin Pop
                            )
3153 a8083063 Iustin Pop
3154 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3155 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3156 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3157 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3158 a8083063 Iustin Pop
3159 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3160 a8083063 Iustin Pop
3161 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3162 a8083063 Iustin Pop
3163 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3164 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3165 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3166 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3167 a8083063 Iustin Pop
      time.sleep(15)
3168 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3169 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3170 a8083063 Iustin Pop
    else:
3171 a8083063 Iustin Pop
      disk_abort = False
3172 a8083063 Iustin Pop
3173 a8083063 Iustin Pop
    if disk_abort:
3174 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3175 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3176 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3177 3ecf6786 Iustin Pop
                               " this instance")
3178 a8083063 Iustin Pop
3179 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3180 a8083063 Iustin Pop
                (instance, pnode_name))
3181 a8083063 Iustin Pop
3182 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3183 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3184 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3185 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3186 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3187 3ecf6786 Iustin Pop
                                   " on node %s" %
3188 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3189 a8083063 Iustin Pop
3190 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3191 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3192 a8083063 Iustin Pop
        src_node = self.op.src_node
3193 a8083063 Iustin Pop
        src_image = self.src_image
3194 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3195 a8083063 Iustin Pop
                                                src_node, src_image):
3196 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3197 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3198 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3199 a8083063 Iustin Pop
      else:
3200 a8083063 Iustin Pop
        # also checked in the prereq part
3201 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3202 3ecf6786 Iustin Pop
                                     % self.op.mode)
3203 a8083063 Iustin Pop
3204 a8083063 Iustin Pop
    if self.op.start:
3205 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3206 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3207 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3208 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3209 a8083063 Iustin Pop
3210 a8083063 Iustin Pop
3211 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3212 a8083063 Iustin Pop
  """Connect to an instance's console.
3213 a8083063 Iustin Pop

3214 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3215 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3216 a8083063 Iustin Pop
  console.
3217 a8083063 Iustin Pop

3218 a8083063 Iustin Pop
  """
3219 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3220 a8083063 Iustin Pop
3221 a8083063 Iustin Pop
  def CheckPrereq(self):
3222 a8083063 Iustin Pop
    """Check prerequisites.
3223 a8083063 Iustin Pop

3224 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3225 a8083063 Iustin Pop

3226 a8083063 Iustin Pop
    """
3227 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3228 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3229 a8083063 Iustin Pop
    if instance is None:
3230 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3231 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3232 a8083063 Iustin Pop
    self.instance = instance
3233 a8083063 Iustin Pop
3234 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3235 a8083063 Iustin Pop
    """Connect to the console of an instance
3236 a8083063 Iustin Pop

3237 a8083063 Iustin Pop
    """
3238 a8083063 Iustin Pop
    instance = self.instance
3239 a8083063 Iustin Pop
    node = instance.primary_node
3240 a8083063 Iustin Pop
3241 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3242 a8083063 Iustin Pop
    if node_insts is False:
3243 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3244 a8083063 Iustin Pop
3245 a8083063 Iustin Pop
    if instance.name not in node_insts:
3246 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3247 a8083063 Iustin Pop
3248 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3249 a8083063 Iustin Pop
3250 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3251 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3252 82122173 Iustin Pop
    # build ssh cmdline
3253 82122173 Iustin Pop
    argv = ["ssh", "-q", "-t"]
3254 82122173 Iustin Pop
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
3255 82122173 Iustin Pop
    argv.extend(ssh.BATCH_MODE_OPTS)
3256 82122173 Iustin Pop
    argv.append(node)
3257 82122173 Iustin Pop
    argv.append(console_cmd)
3258 82122173 Iustin Pop
    return "ssh", argv
3259 a8083063 Iustin Pop
3260 a8083063 Iustin Pop
3261 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
3262 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
3263 a8083063 Iustin Pop

3264 a8083063 Iustin Pop
  """
3265 a8083063 Iustin Pop
  HPATH = "mirror-add"
3266 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3267 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3268 a8083063 Iustin Pop
3269 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3270 a8083063 Iustin Pop
    """Build hooks env.
3271 a8083063 Iustin Pop

3272 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3273 a8083063 Iustin Pop

3274 a8083063 Iustin Pop
    """
3275 a8083063 Iustin Pop
    env = {
3276 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3277 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3278 a8083063 Iustin Pop
      }
3279 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3280 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3281 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3282 a8083063 Iustin Pop
    return env, nl, nl
3283 a8083063 Iustin Pop
3284 a8083063 Iustin Pop
  def CheckPrereq(self):
3285 a8083063 Iustin Pop
    """Check prerequisites.
3286 a8083063 Iustin Pop

3287 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3288 a8083063 Iustin Pop

3289 a8083063 Iustin Pop
    """
3290 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3291 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3292 a8083063 Iustin Pop
    if instance is None:
3293 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3294 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3295 a8083063 Iustin Pop
    self.instance = instance
3296 a8083063 Iustin Pop
3297 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3298 a8083063 Iustin Pop
    if remote_node is None:
3299 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3300 a8083063 Iustin Pop
    self.remote_node = remote_node
3301 a8083063 Iustin Pop
3302 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3303 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3304 3ecf6786 Iustin Pop
                                 " the instance.")
3305 a8083063 Iustin Pop
3306 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3307 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3308 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3309 a8083063 Iustin Pop
    for disk in instance.disks:
3310 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3311 a8083063 Iustin Pop
        break
3312 a8083063 Iustin Pop
    else:
3313 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3314 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3315 a8083063 Iustin Pop
    if len(disk.children) > 1:
3316 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("The device already has two slave devices."
3317 f4bc1f2c Michael Hanselmann
                                 " This would create a 3-disk raid1 which we"
3318 f4bc1f2c Michael Hanselmann
                                 " don't allow.")
3319 a8083063 Iustin Pop
    self.disk = disk
3320 a8083063 Iustin Pop
3321 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3322 a8083063 Iustin Pop
    """Add the mirror component
3323 a8083063 Iustin Pop

3324 a8083063 Iustin Pop
    """
3325 a8083063 Iustin Pop
    disk = self.disk
3326 a8083063 Iustin Pop
    instance = self.instance
3327 a8083063 Iustin Pop
3328 a8083063 Iustin Pop
    remote_node = self.remote_node
3329 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3330 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
3331 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3332 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
3333 a8083063 Iustin Pop
3334 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
3335 a8083063 Iustin Pop
    #HARDCODE
3336 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, instance,
3337 3f78eef2 Iustin Pop
                                      new_drbd, False,
3338 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3339 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
3340 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
3341 a8083063 Iustin Pop
3342 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
3343 a8083063 Iustin Pop
    #HARDCODE
3344 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node,
3345 3f78eef2 Iustin Pop
                                    instance, new_drbd,
3346 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
3347 a8083063 Iustin Pop
      # remove secondary dev
3348 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3349 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
3350 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
3351 a8083063 Iustin Pop
3352 a8083063 Iustin Pop
    # the device exists now
3353 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
3354 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
3355 153d9724 Iustin Pop
    if not rpc.call_blockdev_addchildren(instance.primary_node,
3356 153d9724 Iustin Pop
                                         disk, [new_drbd]):
3357 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
3358 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3359 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3360 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
3361 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3362 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3363 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
3364 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
3365 a8083063 Iustin Pop
3366 a8083063 Iustin Pop
    disk.children.append(new_drbd)
3367 a8083063 Iustin Pop
3368 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3369 a8083063 Iustin Pop
3370 5bfac263 Iustin Pop
    _WaitForSync(self.cfg, instance, self.proc)
3371 a8083063 Iustin Pop
3372 a8083063 Iustin Pop
    return 0
3373 a8083063 Iustin Pop
3374 a8083063 Iustin Pop
3375 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
3376 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
3377 a8083063 Iustin Pop

3378 a8083063 Iustin Pop
  """
3379 a8083063 Iustin Pop
  HPATH = "mirror-remove"
3380 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3381 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3382 a8083063 Iustin Pop
3383 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3384 a8083063 Iustin Pop
    """Build hooks env.
3385 a8083063 Iustin Pop

3386 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3387 a8083063 Iustin Pop

3388 a8083063 Iustin Pop
    """
3389 a8083063 Iustin Pop
    env = {
3390 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3391 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
3392 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
3393 a8083063 Iustin Pop
      }
3394 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3395 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3396 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3397 a8083063 Iustin Pop
    return env, nl, nl
3398 a8083063 Iustin Pop
3399 a8083063 Iustin Pop
  def CheckPrereq(self):
3400 a8083063 Iustin Pop
    """Check prerequisites.
3401 a8083063 Iustin Pop

3402 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3403 a8083063 Iustin Pop

3404 a8083063 Iustin Pop
    """
3405 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3406 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3407 a8083063 Iustin Pop
    if instance is None:
3408 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3409 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3410 a8083063 Iustin Pop
    self.instance = instance
3411 a8083063 Iustin Pop
3412 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3413 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3414 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3415 a8083063 Iustin Pop
    for disk in instance.disks:
3416 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3417 a8083063 Iustin Pop
        break
3418 a8083063 Iustin Pop
    else:
3419 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3420 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3421 a8083063 Iustin Pop
    for child in disk.children:
3422 fe96220b Iustin Pop
      if (child.dev_type == constants.LD_DRBD7 and
3423 fe96220b Iustin Pop
          child.logical_id[2] == self.op.disk_id):
3424 a8083063 Iustin Pop
        break
3425 a8083063 Iustin Pop
    else:
3426 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3427 a8083063 Iustin Pop
3428 a8083063 Iustin Pop
    if len(disk.children) < 2:
3429 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3430 3ecf6786 Iustin Pop
                                 " a mirror.")
3431 a8083063 Iustin Pop
    self.disk = disk
3432 a8083063 Iustin Pop
    self.child = child
3433 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3434 a8083063 Iustin Pop
      oid = 1
3435 a8083063 Iustin Pop
    else:
3436 a8083063 Iustin Pop
      oid = 0
3437 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3438 a8083063 Iustin Pop
3439 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3440 a8083063 Iustin Pop
    """Remove the mirror component
3441 a8083063 Iustin Pop

3442 a8083063 Iustin Pop
    """
3443 a8083063 Iustin Pop
    instance = self.instance
3444 a8083063 Iustin Pop
    disk = self.disk
3445 a8083063 Iustin Pop
    child = self.child
3446 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3447 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3448 153d9724 Iustin Pop
    if not rpc.call_blockdev_removechildren(instance.primary_node,
3449 153d9724 Iustin Pop
                                            disk, [child]):
3450 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3451 a8083063 Iustin Pop
3452 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3453 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3454 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3455 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3456 a8083063 Iustin Pop
                     " continuing operation." % node)
3457 a8083063 Iustin Pop
3458 a8083063 Iustin Pop
    disk.children.remove(child)
3459 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3460 a8083063 Iustin Pop
3461 a8083063 Iustin Pop
3462 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3463 a8083063 Iustin Pop
  """Replace the disks of an instance.
3464 a8083063 Iustin Pop

3465 a8083063 Iustin Pop
  """
3466 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3467 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3468 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3469 a8083063 Iustin Pop
3470 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3471 a8083063 Iustin Pop
    """Build hooks env.
3472 a8083063 Iustin Pop

3473 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3474 a8083063 Iustin Pop

3475 a8083063 Iustin Pop
    """
3476 a8083063 Iustin Pop
    env = {
3477 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3478 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3479 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3480 a8083063 Iustin Pop
      }
3481 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3482 0834c866 Iustin Pop
    nl = [
3483 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3484 0834c866 Iustin Pop
      self.instance.primary_node,
3485 0834c866 Iustin Pop
      ]
3486 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3487 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3488 a8083063 Iustin Pop
    return env, nl, nl
3489 a8083063 Iustin Pop
3490 a8083063 Iustin Pop
  def CheckPrereq(self):
3491 a8083063 Iustin Pop
    """Check prerequisites.
3492 a8083063 Iustin Pop

3493 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3494 a8083063 Iustin Pop

3495 a8083063 Iustin Pop
    """
3496 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3497 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3498 a8083063 Iustin Pop
    if instance is None:
3499 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3500 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3501 a8083063 Iustin Pop
    self.instance = instance
3502 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3503 a8083063 Iustin Pop
3504 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3505 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3506 a9e0c397 Iustin Pop
                                 " network mirrored.")
3507 a8083063 Iustin Pop
3508 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3509 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3510 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3511 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3512 a8083063 Iustin Pop
3513 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3514 a9e0c397 Iustin Pop
3515 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3516 a9e0c397 Iustin Pop
    if remote_node is not None:
3517 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3518 a8083063 Iustin Pop
      if remote_node is None:
3519 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3520 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3521 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3522 a9e0c397 Iustin Pop
    else:
3523 a9e0c397 Iustin Pop
      self.remote_node_info = None
3524 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3525 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3526 3ecf6786 Iustin Pop
                                 " the instance.")
3527 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3528 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3529 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3530 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3531 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3532 0834c866 Iustin Pop
                                   " replacement")
3533 a9e0c397 Iustin Pop
      # the user gave the current secondary, switch to
3534 0834c866 Iustin Pop
      # 'no-replace-secondary' mode for drbd7
3535 a9e0c397 Iustin Pop
      remote_node = None
3536 a9e0c397 Iustin Pop
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3537 a9e0c397 Iustin Pop
        self.op.mode != constants.REPLACE_DISK_ALL):
3538 a9e0c397 Iustin Pop
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3539 a9e0c397 Iustin Pop
                                 " disks replacement, not individual ones")
3540 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3541 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3542 7df43a76 Iustin Pop
          remote_node is not None):
3543 7df43a76 Iustin Pop
        # switch to replace secondary mode
3544 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3545 7df43a76 Iustin Pop
3546 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3547 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3548 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3549 a9e0c397 Iustin Pop
                                   " both at once")
3550 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3551 a9e0c397 Iustin Pop
        if remote_node is not None:
3552 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3553 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3554 a9e0c397 Iustin Pop
                                     " node disk replacement")
3555 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3556 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3557 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3558 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3559 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3560 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3561 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3562 a9e0c397 Iustin Pop
      else:
3563 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3564 a9e0c397 Iustin Pop
3565 a9e0c397 Iustin Pop
    for name in self.op.disks:
3566 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3567 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3568 a9e0c397 Iustin Pop
                                   (name, instance.name))
3569 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3570 a8083063 Iustin Pop
3571 a9e0c397 Iustin Pop
  def _ExecRR1(self, feedback_fn):
3572 a8083063 Iustin Pop
    """Replace the disks of an instance.
3573 a8083063 Iustin Pop

3574 a8083063 Iustin Pop
    """
3575 a8083063 Iustin Pop
    instance = self.instance
3576 a8083063 Iustin Pop
    iv_names = {}
3577 a8083063 Iustin Pop
    # start of work
3578 a9e0c397 Iustin Pop
    if self.op.remote_node is None:
3579 a9e0c397 Iustin Pop
      remote_node = self.sec_node
3580 a9e0c397 Iustin Pop
    else:
3581 a9e0c397 Iustin Pop
      remote_node = self.op.remote_node
3582 a8083063 Iustin Pop
    cfg = self.cfg
3583 a8083063 Iustin Pop
    for dev in instance.disks:
3584 a8083063 Iustin Pop
      size = dev.size
3585 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3586 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3587 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3588 923b1523 Iustin Pop
                                       remote_node, size, names)
3589 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3590 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3591 a8083063 Iustin Pop
                  dev.iv_name)
3592 a8083063 Iustin Pop
      #HARDCODE
3593 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3594 3f78eef2 Iustin Pop
                                        new_drbd, False,
3595 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3596 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create new component on secondary"
3597 f4bc1f2c Michael Hanselmann
                                 " node %s. Full abort, cleanup manually!" %
3598 3ecf6786 Iustin Pop
                                 remote_node)
3599 a8083063 Iustin Pop
3600 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3601 a8083063 Iustin Pop
      #HARDCODE
3602 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3603 3f78eef2 Iustin Pop
                                      instance, new_drbd,
3604 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3605 a8083063 Iustin Pop
        # remove secondary dev
3606 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3607 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3608 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create volume on primary!"
3609 f4bc1f2c Michael Hanselmann
                                 " Full abort, cleanup manually!!")
3610 a8083063 Iustin Pop
3611 a8083063 Iustin Pop
      # the device exists now
3612 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3613 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3614 153d9724 Iustin Pop
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3615 153d9724 Iustin Pop
                                           [new_drbd]):
3616 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3617 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3618 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3619 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3620 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3621 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3622 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3623 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3624 a8083063 Iustin Pop
3625 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3626 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3627 a8083063 Iustin Pop
3628 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3629 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3630 a8083063 Iustin Pop
    # return value
3631 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3632 a8083063 Iustin Pop
3633 a8083063 Iustin Pop
    # so check manually all the devices
3634 a8083063 Iustin Pop
    for name in iv_names:
3635 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3636 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3637 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3638 a8083063 Iustin Pop
      if is_degr:
3639 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3640 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3641 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3642 a8083063 Iustin Pop
      if is_degr:
3643 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3644 a8083063 Iustin Pop
3645 a8083063 Iustin Pop
    for name in iv_names:
3646 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3647 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3648 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3649 153d9724 Iustin Pop
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3650 153d9724 Iustin Pop
                                              dev, [child]):
3651 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3652 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3653 a8083063 Iustin Pop
        continue
3654 a8083063 Iustin Pop
3655 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3656 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3657 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3658 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3659 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3660 a8083063 Iustin Pop
                       " continuing operation." % node)
3661 a8083063 Iustin Pop
3662 a8083063 Iustin Pop
      dev.children.remove(child)
3663 a8083063 Iustin Pop
3664 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3665 a8083063 Iustin Pop
3666 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3667 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3668 a9e0c397 Iustin Pop

3669 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3670 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3671 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3672 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3673 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3674 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3675 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3676 a9e0c397 Iustin Pop
      - wait for sync across all devices
3677 a9e0c397 Iustin Pop
      - for each modified disk:
3678 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3679 a9e0c397 Iustin Pop

3680 a9e0c397 Iustin Pop
    Failures are not very well handled.
3681 cff90b79 Iustin Pop

3682 a9e0c397 Iustin Pop
    """
3683 cff90b79 Iustin Pop
    steps_total = 6
3684 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3685 a9e0c397 Iustin Pop
    instance = self.instance
3686 a9e0c397 Iustin Pop
    iv_names = {}
3687 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3688 a9e0c397 Iustin Pop
    # start of work
3689 a9e0c397 Iustin Pop
    cfg = self.cfg
3690 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3691 cff90b79 Iustin Pop
    oth_node = self.oth_node
3692 cff90b79 Iustin Pop
3693 cff90b79 Iustin Pop
    # Step: check device activation
3694 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3695 cff90b79 Iustin Pop
    info("checking volume groups")
3696 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3697 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3698 cff90b79 Iustin Pop
    if not results:
3699 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3700 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3701 cff90b79 Iustin Pop
      res = results.get(node, False)
3702 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3703 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3704 cff90b79 Iustin Pop
                                 (my_vg, node))
3705 cff90b79 Iustin Pop
    for dev in instance.disks:
3706 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3707 cff90b79 Iustin Pop
        continue
3708 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3709 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3710 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3711 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3712 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3713 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3714 cff90b79 Iustin Pop
3715 cff90b79 Iustin Pop
    # Step: check other node consistency
3716 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3717 cff90b79 Iustin Pop
    for dev in instance.disks:
3718 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3719 cff90b79 Iustin Pop
        continue
3720 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3721 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3722 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3723 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3724 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3725 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3726 cff90b79 Iustin Pop
3727 cff90b79 Iustin Pop
    # Step: create new storage
3728 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3729 a9e0c397 Iustin Pop
    for dev in instance.disks:
3730 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3731 a9e0c397 Iustin Pop
        continue
3732 a9e0c397 Iustin Pop
      size = dev.size
3733 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3734 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3735 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3736 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3737 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3738 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3739 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3740 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3741 a9e0c397 Iustin Pop
      old_lvs = dev.children
3742 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3743 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3744 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3745 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3746 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3747 a9e0c397 Iustin Pop
      # are talking about the secondary node
3748 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3749 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3750 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3751 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3752 a9e0c397 Iustin Pop
                                   " node '%s'" %
3753 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3754 a9e0c397 Iustin Pop
3755 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3756 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3757 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3758 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3759 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3760 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3761 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3762 cff90b79 Iustin Pop
      #dev.children = []
3763 cff90b79 Iustin Pop
      #cfg.Update(instance)
3764 a9e0c397 Iustin Pop
3765 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3766 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3767 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3768 c99a3cc0 Manuel Franceschini
      # using the assumption that logical_id == physical_id (which in
3769 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3770 cff90b79 Iustin Pop
3771 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3772 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3773 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3774 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3775 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3776 cff90b79 Iustin Pop
      rlist = []
3777 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3778 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3779 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3780 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3781 cff90b79 Iustin Pop
3782 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3783 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3784 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3785 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3786 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3787 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3788 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3789 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3790 cff90b79 Iustin Pop
3791 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3792 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3793 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3794 a9e0c397 Iustin Pop
3795 cff90b79 Iustin Pop
      for disk in old_lvs:
3796 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3797 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3798 a9e0c397 Iustin Pop
3799 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3800 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3801 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3802 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3803 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3804 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3805 cff90b79 Iustin Pop
                    " logical volumes")
3806 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3807 a9e0c397 Iustin Pop
3808 a9e0c397 Iustin Pop
      dev.children = new_lvs
3809 a9e0c397 Iustin Pop
      cfg.Update(instance)
3810 a9e0c397 Iustin Pop
3811 cff90b79 Iustin Pop
    # Step: wait for sync
3812 a9e0c397 Iustin Pop
3813 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3814 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3815 a9e0c397 Iustin Pop
    # return value
3816 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3817 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3818 a9e0c397 Iustin Pop
3819 a9e0c397 Iustin Pop
    # so check manually all the devices
3820 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3821 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3822 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3823 a9e0c397 Iustin Pop
      if is_degr:
3824 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3825 a9e0c397 Iustin Pop
3826 cff90b79 Iustin Pop
    # Step: remove old storage
3827 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3828 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3829 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3830 a9e0c397 Iustin Pop
      for lv in old_lvs:
3831 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3832 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3833 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3834 a9e0c397 Iustin Pop
          continue
3835 a9e0c397 Iustin Pop
3836 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3837 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3838 a9e0c397 Iustin Pop

3839 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3840 a9e0c397 Iustin Pop
      - for all disks of the instance:
3841 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3842 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3843 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3844 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3845 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3846 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3847 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3848 a9e0c397 Iustin Pop
          not network enabled
3849 a9e0c397 Iustin Pop
      - wait for sync across all devices
3850 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3851 a9e0c397 Iustin Pop

3852 a9e0c397 Iustin Pop
    Failures are not very well handled.
3853 0834c866 Iustin Pop

3854 a9e0c397 Iustin Pop
    """
3855 0834c866 Iustin Pop
    steps_total = 6
3856 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3857 a9e0c397 Iustin Pop
    instance = self.instance
3858 a9e0c397 Iustin Pop
    iv_names = {}
3859 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3860 a9e0c397 Iustin Pop
    # start of work
3861 a9e0c397 Iustin Pop
    cfg = self.cfg
3862 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3863 a9e0c397 Iustin Pop
    new_node = self.new_node
3864 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3865 0834c866 Iustin Pop
3866 0834c866 Iustin Pop
    # Step: check device activation
3867 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3868 0834c866 Iustin Pop
    info("checking volume groups")
3869 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3870 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3871 0834c866 Iustin Pop
    if not results:
3872 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3873 0834c866 Iustin Pop
    for node in pri_node, new_node:
3874 0834c866 Iustin Pop
      res = results.get(node, False)
3875 0834c866 Iustin Pop
      if not res or my_vg not in res:
3876 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3877 0834c866 Iustin Pop
                                 (my_vg, node))
3878 0834c866 Iustin Pop
    for dev in instance.disks:
3879 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3880 0834c866 Iustin Pop
        continue
3881 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3882 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3883 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3884 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3885 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3886 0834c866 Iustin Pop
3887 0834c866 Iustin Pop
    # Step: check other node consistency
3888 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3889 0834c866 Iustin Pop
    for dev in instance.disks:
3890 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3891 0834c866 Iustin Pop
        continue
3892 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3893 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3894 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3895 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3896 0834c866 Iustin Pop
                                 pri_node)
3897 0834c866 Iustin Pop
3898 0834c866 Iustin Pop
    # Step: create new storage
3899 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3900 a9e0c397 Iustin Pop
    for dev in instance.disks:
3901 a9e0c397 Iustin Pop
      size = dev.size
3902 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3903 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3904 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3905 a9e0c397 Iustin Pop
      # are talking about the secondary node
3906 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3907 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3908 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3909 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3910 a9e0c397 Iustin Pop
                                   " node '%s'" %
3911 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3912 a9e0c397 Iustin Pop
3913 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3914 0834c866 Iustin Pop
3915 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3916 0834c866 Iustin Pop
    for dev in instance.disks:
3917 0834c866 Iustin Pop
      size = dev.size
3918 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3919 a9e0c397 Iustin Pop
      # create new devices on new_node
3920 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3921 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3922 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3923 a9e0c397 Iustin Pop
                              children=dev.children)
3924 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3925 3f78eef2 Iustin Pop
                                        new_drbd, False,
3926 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3927 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3928 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3929 a9e0c397 Iustin Pop
3930 0834c866 Iustin Pop
    for dev in instance.disks:
3931 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3932 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3933 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3934 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3935 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3936 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3937 a9e0c397 Iustin Pop
3938 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3939 642445d9 Iustin Pop
    done = 0
3940 642445d9 Iustin Pop
    for dev in instance.disks:
3941 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3942 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3943 642445d9 Iustin Pop
      # detach from network
3944 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3945 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3946 642445d9 Iustin Pop
      # standalone state
3947 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3948 642445d9 Iustin Pop
        done += 1
3949 642445d9 Iustin Pop
      else:
3950 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3951 642445d9 Iustin Pop
                dev.iv_name)
3952 642445d9 Iustin Pop
3953 642445d9 Iustin Pop
    if not done:
3954 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3955 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3956 642445d9 Iustin Pop
3957 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3958 642445d9 Iustin Pop
    # the instance to point to the new secondary
3959 642445d9 Iustin Pop
    info("updating instance configuration")
3960 642445d9 Iustin Pop
    for dev in instance.disks:
3961 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3962 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3963 642445d9 Iustin Pop
    cfg.Update(instance)
3964 a9e0c397 Iustin Pop
3965 642445d9 Iustin Pop
    # and now perform the drbd attach
3966 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3967 642445d9 Iustin Pop
    failures = []
3968 642445d9 Iustin Pop
    for dev in instance.disks:
3969 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3970 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3971 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3972 642445d9 Iustin Pop
      # is correct
3973 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3974 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3975 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3976 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3977 a9e0c397 Iustin Pop
3978 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3979 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3980 a9e0c397 Iustin Pop
    # return value
3981 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3982 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3983 a9e0c397 Iustin Pop
3984 a9e0c397 Iustin Pop
    # so check manually all the devices
3985 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3986 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3987 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3988 a9e0c397 Iustin Pop
      if is_degr:
3989 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3990 a9e0c397 Iustin Pop
3991 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3992 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3993 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3994 a9e0c397 Iustin Pop
      for lv in old_lvs:
3995 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3996 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3997 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3998 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3999 a9e0c397 Iustin Pop
4000 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
4001 a9e0c397 Iustin Pop
    """Execute disk replacement.
4002 a9e0c397 Iustin Pop

4003 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
4004 a9e0c397 Iustin Pop

4005 a9e0c397 Iustin Pop
    """
4006 a9e0c397 Iustin Pop
    instance = self.instance
4007 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_REMOTE_RAID1:
4008 a9e0c397 Iustin Pop
      fn = self._ExecRR1
4009 a9e0c397 Iustin Pop
    elif instance.disk_template == constants.DT_DRBD8:
4010 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4011 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4012 a9e0c397 Iustin Pop
      else:
4013 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4014 a9e0c397 Iustin Pop
    else:
4015 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4016 a9e0c397 Iustin Pop
    return fn(feedback_fn)
4017 a9e0c397 Iustin Pop
4018 a8083063 Iustin Pop
4019 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4020 a8083063 Iustin Pop
  """Query runtime instance data.
4021 a8083063 Iustin Pop

4022 a8083063 Iustin Pop
  """
4023 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4024 a8083063 Iustin Pop
4025 a8083063 Iustin Pop
  def CheckPrereq(self):
4026 a8083063 Iustin Pop
    """Check prerequisites.
4027 a8083063 Iustin Pop

4028 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4029 a8083063 Iustin Pop

4030 a8083063 Iustin Pop
    """
4031 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
4032 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4033 a8083063 Iustin Pop
    if self.op.instances:
4034 a8083063 Iustin Pop
      self.wanted_instances = []
4035 a8083063 Iustin Pop
      names = self.op.instances
4036 a8083063 Iustin Pop
      for name in names:
4037 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
4038 a8083063 Iustin Pop
        if instance is None:
4039 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
4040 515207af Guido Trotter
        self.wanted_instances.append(instance)
4041 a8083063 Iustin Pop
    else:
4042 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4043 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
4044 a8083063 Iustin Pop
    return
4045 a8083063 Iustin Pop
4046 a8083063 Iustin Pop
4047 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4048 a8083063 Iustin Pop
    """Compute block device status.
4049 a8083063 Iustin Pop

4050 a8083063 Iustin Pop
    """
4051 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4052 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4053 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4054 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4055 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4056 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4057 a8083063 Iustin Pop
      else:
4058 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4059 a8083063 Iustin Pop
4060 a8083063 Iustin Pop
    if snode:
4061 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4062 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4063 a8083063 Iustin Pop
    else:
4064 a8083063 Iustin Pop
      dev_sstatus = None
4065 a8083063 Iustin Pop
4066 a8083063 Iustin Pop
    if dev.children:
4067 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4068 a8083063 Iustin Pop
                      for child in dev.children]
4069 a8083063 Iustin Pop
    else:
4070 a8083063 Iustin Pop
      dev_children = []
4071 a8083063 Iustin Pop
4072 a8083063 Iustin Pop
    data = {
4073 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4074 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4075 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4076 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4077 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4078 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4079 a8083063 Iustin Pop
      "children": dev_children,
4080 a8083063 Iustin Pop
      }
4081 a8083063 Iustin Pop
4082 a8083063 Iustin Pop
    return data
4083 a8083063 Iustin Pop
4084 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4085 a8083063 Iustin Pop
    """Gather and return data"""
4086 a8083063 Iustin Pop
    result = {}
4087 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4088 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4089 a8083063 Iustin Pop
                                                instance.name)
4090 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4091 a8083063 Iustin Pop
        remote_state = "up"
4092 a8083063 Iustin Pop
      else:
4093 a8083063 Iustin Pop
        remote_state = "down"
4094 a8083063 Iustin Pop
      if instance.status == "down":
4095 a8083063 Iustin Pop
        config_state = "down"
4096 a8083063 Iustin Pop
      else:
4097 a8083063 Iustin Pop
        config_state = "up"
4098 a8083063 Iustin Pop
4099 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4100 a8083063 Iustin Pop
               for device in instance.disks]
4101 a8083063 Iustin Pop
4102 a8083063 Iustin Pop
      idict = {
4103 a8083063 Iustin Pop
        "name": instance.name,
4104 a8083063 Iustin Pop
        "config_state": config_state,
4105 a8083063 Iustin Pop
        "run_state": remote_state,
4106 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4107 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4108 a8083063 Iustin Pop
        "os": instance.os,
4109 a8083063 Iustin Pop
        "memory": instance.memory,
4110 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4111 a8083063 Iustin Pop
        "disks": disks,
4112 58acb49d Alexander Schreiber
        "network_port": instance.network_port,
4113 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4114 71aa8f73 Iustin Pop
        "kernel_path": instance.kernel_path,
4115 71aa8f73 Iustin Pop
        "initrd_path": instance.initrd_path,
4116 8ae6bb54 Iustin Pop
        "hvm_boot_order": instance.hvm_boot_order,
4117 a8083063 Iustin Pop
        }
4118 a8083063 Iustin Pop
4119 a8083063 Iustin Pop
      result[instance.name] = idict
4120 a8083063 Iustin Pop
4121 a8083063 Iustin Pop
    return result
4122 a8083063 Iustin Pop
4123 a8083063 Iustin Pop
4124 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
4125 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4126 a8083063 Iustin Pop

4127 a8083063 Iustin Pop
  """
4128 a8083063 Iustin Pop
  HPATH = "instance-modify"
4129 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4130 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4131 a8083063 Iustin Pop
4132 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4133 a8083063 Iustin Pop
    """Build hooks env.
4134 a8083063 Iustin Pop

4135 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4136 a8083063 Iustin Pop

4137 a8083063 Iustin Pop
    """
4138 396e1b78 Michael Hanselmann
    args = dict()
4139 a8083063 Iustin Pop
    if self.mem:
4140 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4141 a8083063 Iustin Pop
    if self.vcpus:
4142 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4143 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4144 396e1b78 Michael Hanselmann
      if self.do_ip:
4145 396e1b78 Michael Hanselmann
        ip = self.ip
4146 396e1b78 Michael Hanselmann
      else:
4147 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4148 396e1b78 Michael Hanselmann
      if self.bridge:
4149 396e1b78 Michael Hanselmann
        bridge = self.bridge
4150 396e1b78 Michael Hanselmann
      else:
4151 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4152 ef756965 Iustin Pop
      if self.mac:
4153 ef756965 Iustin Pop
        mac = self.mac
4154 ef756965 Iustin Pop
      else:
4155 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4156 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4157 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4158 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4159 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4160 a8083063 Iustin Pop
    return env, nl, nl
4161 a8083063 Iustin Pop
4162 a8083063 Iustin Pop
  def CheckPrereq(self):
4163 a8083063 Iustin Pop
    """Check prerequisites.
4164 a8083063 Iustin Pop

4165 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4166 a8083063 Iustin Pop

4167 a8083063 Iustin Pop
    """
4168 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4169 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4170 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4171 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4172 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4173 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4174 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4175 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4176 973d7867 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4177 25c5878d Alexander Schreiber
                 self.kernel_path, self.initrd_path, self.hvm_boot_order]
4178 973d7867 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4179 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4180 a8083063 Iustin Pop
    if self.mem is not None:
4181 a8083063 Iustin Pop
      try:
4182 a8083063 Iustin Pop
        self.mem = int(self.mem)
4183 a8083063 Iustin Pop
      except ValueError, err:
4184 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4185 a8083063 Iustin Pop
    if self.vcpus is not None:
4186 a8083063 Iustin Pop
      try:
4187 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4188 a8083063 Iustin Pop
      except ValueError, err:
4189 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4190 a8083063 Iustin Pop
    if self.ip is not None:
4191 a8083063 Iustin Pop
      self.do_ip = True
4192 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4193 a8083063 Iustin Pop
        self.ip = None
4194 a8083063 Iustin Pop
      else:
4195 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4196 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4197 a8083063 Iustin Pop
    else:
4198 a8083063 Iustin Pop
      self.do_ip = False
4199 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4200 1862d460 Alexander Schreiber
    if self.mac is not None:
4201 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4202 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4203 1862d460 Alexander Schreiber
                                   self.mac)
4204 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4205 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4206 a8083063 Iustin Pop
4207 973d7867 Iustin Pop
    if self.kernel_path is not None:
4208 973d7867 Iustin Pop
      self.do_kernel_path = True
4209 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4210 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4211 973d7867 Iustin Pop
4212 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4213 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4214 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4215 973d7867 Iustin Pop
                                    " filename")
4216 8cafeb26 Iustin Pop
    else:
4217 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4218 973d7867 Iustin Pop
4219 973d7867 Iustin Pop
    if self.initrd_path is not None:
4220 973d7867 Iustin Pop
      self.do_initrd_path = True
4221 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4222 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4223 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4224 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4225 973d7867 Iustin Pop
                                    " filename")
4226 8cafeb26 Iustin Pop
    else:
4227 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4228 973d7867 Iustin Pop
4229 25c5878d Alexander Schreiber
    # boot order verification
4230 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4231 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4232 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4233 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4234 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4235 25c5878d Alexander Schreiber
                                     " or 'default'")
4236 25c5878d Alexander Schreiber
4237 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4238 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4239 a8083063 Iustin Pop
    if instance is None:
4240 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
4241 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4242 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4243 a8083063 Iustin Pop
    self.instance = instance
4244 a8083063 Iustin Pop
    return
4245 a8083063 Iustin Pop
4246 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4247 a8083063 Iustin Pop
    """Modifies an instance.
4248 a8083063 Iustin Pop

4249 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4250 a8083063 Iustin Pop
    """
4251 a8083063 Iustin Pop
    result = []
4252 a8083063 Iustin Pop
    instance = self.instance
4253 a8083063 Iustin Pop
    if self.mem:
4254 a8083063 Iustin Pop
      instance.memory = self.mem
4255 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4256 a8083063 Iustin Pop
    if self.vcpus:
4257 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4258 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4259 a8083063 Iustin Pop
    if self.do_ip:
4260 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4261 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4262 a8083063 Iustin Pop
    if self.bridge:
4263 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4264 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4265 1862d460 Alexander Schreiber
    if self.mac:
4266 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4267 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4268 973d7867 Iustin Pop
    if self.do_kernel_path:
4269 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4270 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4271 973d7867 Iustin Pop
    if self.do_initrd_path:
4272 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4273 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4274 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4275 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4276 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4277 25c5878d Alexander Schreiber
      else:
4278 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4279 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4280 a8083063 Iustin Pop
4281 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4282 a8083063 Iustin Pop
4283 a8083063 Iustin Pop
    return result
4284 a8083063 Iustin Pop
4285 a8083063 Iustin Pop
4286 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4287 a8083063 Iustin Pop
  """Query the exports list
4288 a8083063 Iustin Pop

4289 a8083063 Iustin Pop
  """
4290 a8083063 Iustin Pop
  _OP_REQP = []
4291 a8083063 Iustin Pop
4292 a8083063 Iustin Pop
  def CheckPrereq(self):
4293 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4294 a8083063 Iustin Pop

4295 a8083063 Iustin Pop
    """
4296 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4297 a8083063 Iustin Pop
4298 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4299 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4300 a8083063 Iustin Pop

4301 a8083063 Iustin Pop
    Returns:
4302 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4303 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4304 a8083063 Iustin Pop
      that node.
4305 a8083063 Iustin Pop

4306 a8083063 Iustin Pop
    """
4307 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4308 a8083063 Iustin Pop
4309 a8083063 Iustin Pop
4310 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4311 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4312 a8083063 Iustin Pop

4313 a8083063 Iustin Pop
  """
4314 a8083063 Iustin Pop
  HPATH = "instance-export"
4315 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4316 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4317 a8083063 Iustin Pop
4318 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4319 a8083063 Iustin Pop
    """Build hooks env.
4320 a8083063 Iustin Pop

4321 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4322 a8083063 Iustin Pop

4323 a8083063 Iustin Pop
    """
4324 a8083063 Iustin Pop
    env = {
4325 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4326 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4327 a8083063 Iustin Pop
      }
4328 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4329 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4330 a8083063 Iustin Pop
          self.op.target_node]
4331 a8083063 Iustin Pop
    return env, nl, nl
4332 a8083063 Iustin Pop
4333 a8083063 Iustin Pop
  def CheckPrereq(self):
4334 a8083063 Iustin Pop
    """Check prerequisites.
4335 a8083063 Iustin Pop

4336 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
4337 a8083063 Iustin Pop

4338 a8083063 Iustin Pop
    """
4339 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4340 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4341 a8083063 Iustin Pop
    if self.instance is None:
4342 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4343 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4344 a8083063 Iustin Pop
4345 a8083063 Iustin Pop
    # node verification
4346 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4347 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4348 a8083063 Iustin Pop
4349 a8083063 Iustin Pop
    if self.dst_node is None:
4350 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4351 3ecf6786 Iustin Pop
                                 self.op.target_node)
4352 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4353 a8083063 Iustin Pop
4354 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4355 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4356 a8083063 Iustin Pop

4357 a8083063 Iustin Pop
    """
4358 a8083063 Iustin Pop
    instance = self.instance
4359 a8083063 Iustin Pop
    dst_node = self.dst_node
4360 a8083063 Iustin Pop
    src_node = instance.primary_node
4361 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
4362 a8083063 Iustin Pop
    if self.op.shutdown:
4363 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
4364 5bfac263 Iustin Pop
      self.proc.ChainOpCode(op)
4365 a8083063 Iustin Pop
4366 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4367 a8083063 Iustin Pop
4368 a8083063 Iustin Pop
    snap_disks = []
4369 a8083063 Iustin Pop
4370 a8083063 Iustin Pop
    try:
4371 a8083063 Iustin Pop
      for disk in instance.disks:
4372 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4373 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4374 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4375 a8083063 Iustin Pop
4376 a8083063 Iustin Pop
          if not new_dev_name:
4377 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4378 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4379 a8083063 Iustin Pop
          else:
4380 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4381 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4382 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4383 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4384 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4385 a8083063 Iustin Pop
4386 a8083063 Iustin Pop
    finally:
4387 a8083063 Iustin Pop
      if self.op.shutdown:
4388 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
4389 a8083063 Iustin Pop
                                       force=False)
4390 5bfac263 Iustin Pop
        self.proc.ChainOpCode(op)
4391 a8083063 Iustin Pop
4392 a8083063 Iustin Pop
    # TODO: check for size
4393 a8083063 Iustin Pop
4394 a8083063 Iustin Pop
    for dev in snap_disks:
4395 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
4396 a8083063 Iustin Pop
                                           instance):
4397 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
4398 a8083063 Iustin Pop
                     " %s to node %s" %
4399 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
4400 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4401 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
4402 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
4403 a8083063 Iustin Pop
4404 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4405 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4406 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4407 a8083063 Iustin Pop
4408 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4409 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4410 a8083063 Iustin Pop
4411 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4412 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4413 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4414 a8083063 Iustin Pop
    if nodelist:
4415 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4416 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4417 a8083063 Iustin Pop
      for node in exportlist:
4418 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4419 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4420 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4421 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4422 5c947f38 Iustin Pop
4423 5c947f38 Iustin Pop
4424 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4425 5c947f38 Iustin Pop
  """Generic tags LU.
4426 5c947f38 Iustin Pop

4427 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4428 5c947f38 Iustin Pop

4429 5c947f38 Iustin Pop
  """
4430 5c947f38 Iustin Pop
  def CheckPrereq(self):
4431 5c947f38 Iustin Pop
    """Check prerequisites.
4432 5c947f38 Iustin Pop

4433 5c947f38 Iustin Pop
    """
4434 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4435 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4436 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4437 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4438 5c947f38 Iustin Pop
      if name is None:
4439 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4440 3ecf6786 Iustin Pop
                                   (self.op.name,))
4441 5c947f38 Iustin Pop
      self.op.name = name
4442 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4443 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4444 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4445 5c947f38 Iustin Pop
      if name is None:
4446 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4447 3ecf6786 Iustin Pop
                                   (self.op.name,))
4448 5c947f38 Iustin Pop
      self.op.name = name
4449 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4450 5c947f38 Iustin Pop
    else:
4451 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4452 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4453 5c947f38 Iustin Pop
4454 5c947f38 Iustin Pop
4455 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4456 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4457 5c947f38 Iustin Pop

4458 5c947f38 Iustin Pop
  """
4459 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4460 5c947f38 Iustin Pop
4461 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4462 5c947f38 Iustin Pop
    """Returns the tag list.
4463 5c947f38 Iustin Pop

4464 5c947f38 Iustin Pop
    """
4465 5c947f38 Iustin Pop
    return self.target.GetTags()
4466 5c947f38 Iustin Pop
4467 5c947f38 Iustin Pop
4468 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4469 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4470 73415719 Iustin Pop

4471 73415719 Iustin Pop
  """
4472 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4473 73415719 Iustin Pop
4474 73415719 Iustin Pop
  def CheckPrereq(self):
4475 73415719 Iustin Pop
    """Check prerequisites.
4476 73415719 Iustin Pop

4477 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4478 73415719 Iustin Pop

4479 73415719 Iustin Pop
    """
4480 73415719 Iustin Pop
    try:
4481 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4482 73415719 Iustin Pop
    except re.error, err:
4483 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4484 73415719 Iustin Pop
                                 (self.op.pattern, err))
4485 73415719 Iustin Pop
4486 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4487 73415719 Iustin Pop
    """Returns the tag list.
4488 73415719 Iustin Pop

4489 73415719 Iustin Pop
    """
4490 73415719 Iustin Pop
    cfg = self.cfg
4491 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4492 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4493 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4494 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4495 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4496 73415719 Iustin Pop
    results = []
4497 73415719 Iustin Pop
    for path, target in tgts:
4498 73415719 Iustin Pop
      for tag in target.GetTags():
4499 73415719 Iustin Pop
        if self.re.search(tag):
4500 73415719 Iustin Pop
          results.append((path, tag))
4501 73415719 Iustin Pop
    return results
4502 73415719 Iustin Pop
4503 73415719 Iustin Pop
4504 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4505 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4506 5c947f38 Iustin Pop

4507 5c947f38 Iustin Pop
  """
4508 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4509 5c947f38 Iustin Pop
4510 5c947f38 Iustin Pop
  def CheckPrereq(self):
4511 5c947f38 Iustin Pop
    """Check prerequisites.
4512 5c947f38 Iustin Pop

4513 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4514 5c947f38 Iustin Pop

4515 5c947f38 Iustin Pop
    """
4516 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4517 f27302fa Iustin Pop
    for tag in self.op.tags:
4518 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4519 5c947f38 Iustin Pop
4520 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4521 5c947f38 Iustin Pop
    """Sets the tag.
4522 5c947f38 Iustin Pop

4523 5c947f38 Iustin Pop
    """
4524 5c947f38 Iustin Pop
    try:
4525 f27302fa Iustin Pop
      for tag in self.op.tags:
4526 f27302fa Iustin Pop
        self.target.AddTag(tag)
4527 5c947f38 Iustin Pop
    except errors.TagError, err:
4528 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4529 5c947f38 Iustin Pop
    try:
4530 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4531 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4532 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4533 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4534 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4535 5c947f38 Iustin Pop
4536 5c947f38 Iustin Pop
4537 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4538 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4539 5c947f38 Iustin Pop

4540 5c947f38 Iustin Pop
  """
4541 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4542 5c947f38 Iustin Pop
4543 5c947f38 Iustin Pop
  def CheckPrereq(self):
4544 5c947f38 Iustin Pop
    """Check prerequisites.
4545 5c947f38 Iustin Pop

4546 5c947f38 Iustin Pop
    This checks that we have the given tag.
4547 5c947f38 Iustin Pop

4548 5c947f38 Iustin Pop
    """
4549 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4550 f27302fa Iustin Pop
    for tag in self.op.tags:
4551 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4552 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4553 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4554 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4555 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4556 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4557 f27302fa Iustin Pop
      diff_names.sort()
4558 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4559 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4560 5c947f38 Iustin Pop
4561 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4562 5c947f38 Iustin Pop
    """Remove the tag from the object.
4563 5c947f38 Iustin Pop

4564 5c947f38 Iustin Pop
    """
4565 f27302fa Iustin Pop
    for tag in self.op.tags:
4566 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4567 5c947f38 Iustin Pop
    try:
4568 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4569 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4570 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4571 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4572 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4573 06009e27 Iustin Pop
4574 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4575 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4576 06009e27 Iustin Pop

4577 06009e27 Iustin Pop
  This LU sleeps on the master and/or nodes for a specified amoutn of
4578 06009e27 Iustin Pop
  time.
4579 06009e27 Iustin Pop

4580 06009e27 Iustin Pop
  """
4581 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4582 06009e27 Iustin Pop
4583 06009e27 Iustin Pop
  def CheckPrereq(self):
4584 06009e27 Iustin Pop
    """Check prerequisites.
4585 06009e27 Iustin Pop

4586 06009e27 Iustin Pop
    This checks that we have a good list of nodes and/or the duration
4587 06009e27 Iustin Pop
    is valid.
4588 06009e27 Iustin Pop

4589 06009e27 Iustin Pop
    """
4590 06009e27 Iustin Pop
4591 06009e27 Iustin Pop
    if self.op.on_nodes:
4592 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4593 06009e27 Iustin Pop
4594 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4595 06009e27 Iustin Pop
    """Do the actual sleep.
4596 06009e27 Iustin Pop

4597 06009e27 Iustin Pop
    """
4598 06009e27 Iustin Pop
    if self.op.on_master:
4599 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4600 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4601 06009e27 Iustin Pop
    if self.op.on_nodes:
4602 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4603 06009e27 Iustin Pop
      if not result:
4604 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4605 06009e27 Iustin Pop
      for node, node_result in result.items():
4606 06009e27 Iustin Pop
        if not node_result:
4607 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4608 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))