Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ d6646186

History | View | Annotate | Download (153.2 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 a8083063 Iustin Pop
46 a8083063 Iustin Pop
class LogicalUnit(object):
47 396e1b78 Michael Hanselmann
  """Logical Unit base class.
48 a8083063 Iustin Pop

49 a8083063 Iustin Pop
  Subclasses must follow these rules:
50 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
51 a8083063 Iustin Pop
      with all the fields (even if as None)
52 a8083063 Iustin Pop
    - implement Exec
53 a8083063 Iustin Pop
    - implement BuildHooksEnv
54 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
55 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
56 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
57 a8083063 Iustin Pop

58 a8083063 Iustin Pop
  """
59 a8083063 Iustin Pop
  HPATH = None
60 a8083063 Iustin Pop
  HTYPE = None
61 a8083063 Iustin Pop
  _OP_REQP = []
62 a8083063 Iustin Pop
  REQ_CLUSTER = True
63 a8083063 Iustin Pop
  REQ_MASTER = True
64 a8083063 Iustin Pop
65 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
66 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
67 a8083063 Iustin Pop

68 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
69 a8083063 Iustin Pop
    validity.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    """
72 5bfac263 Iustin Pop
    self.proc = processor
73 a8083063 Iustin Pop
    self.op = op
74 a8083063 Iustin Pop
    self.cfg = cfg
75 a8083063 Iustin Pop
    self.sstore = sstore
76 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
77 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
78 a8083063 Iustin Pop
      if attr_val is None:
79 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
80 3ecf6786 Iustin Pop
                                   attr_name)
81 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
82 a8083063 Iustin Pop
      if not cfg.IsCluster():
83 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
84 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
85 a8083063 Iustin Pop
      if self.REQ_MASTER:
86 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
87 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
88 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
89 3ecf6786 Iustin Pop
                                     " node %s" % master)
90 a8083063 Iustin Pop
91 a8083063 Iustin Pop
  def CheckPrereq(self):
92 a8083063 Iustin Pop
    """Check prerequisites for this LU.
93 a8083063 Iustin Pop

94 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
95 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
96 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
97 a8083063 Iustin Pop
    allowed.
98 a8083063 Iustin Pop

99 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
100 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
101 a8083063 Iustin Pop

102 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
103 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
104 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
105 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
106 a8083063 Iustin Pop

107 a8083063 Iustin Pop
    """
108 a8083063 Iustin Pop
    raise NotImplementedError
109 a8083063 Iustin Pop
110 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
111 a8083063 Iustin Pop
    """Execute the LU.
112 a8083063 Iustin Pop

113 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
114 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
115 a8083063 Iustin Pop
    code, or expected.
116 a8083063 Iustin Pop

117 a8083063 Iustin Pop
    """
118 a8083063 Iustin Pop
    raise NotImplementedError
119 a8083063 Iustin Pop
120 a8083063 Iustin Pop
  def BuildHooksEnv(self):
121 a8083063 Iustin Pop
    """Build hooks environment for this LU.
122 a8083063 Iustin Pop

123 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
124 a8083063 Iustin Pop
    containing the environment that will be used for running the
125 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
126 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
127 a8083063 Iustin Pop
    the hook should run after the execution.
128 a8083063 Iustin Pop

129 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
130 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
131 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
132 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
133 a8083063 Iustin Pop

134 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
135 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
136 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
137 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
138 a8083063 Iustin Pop
    None).
139 a8083063 Iustin Pop

140 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
141 a8083063 Iustin Pop
    not be called.
142 a8083063 Iustin Pop

143 a8083063 Iustin Pop
    """
144 a8083063 Iustin Pop
    raise NotImplementedError
145 a8083063 Iustin Pop
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
148 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
149 a8083063 Iustin Pop

150 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
151 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
152 a8083063 Iustin Pop

153 a8083063 Iustin Pop
  """
154 a8083063 Iustin Pop
  HPATH = None
155 a8083063 Iustin Pop
  HTYPE = None
156 a8083063 Iustin Pop
157 a8083063 Iustin Pop
  def BuildHooksEnv(self):
158 a8083063 Iustin Pop
    """Build hooks env.
159 a8083063 Iustin Pop

160 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
161 a8083063 Iustin Pop

162 a8083063 Iustin Pop
    """
163 0e137c28 Iustin Pop
    return {}, [], []
164 a8083063 Iustin Pop
165 a8083063 Iustin Pop
166 9440aeab Michael Hanselmann
def _AddHostToEtcHosts(hostname):
167 9440aeab Michael Hanselmann
  """Wrapper around utils.SetEtcHostsEntry.
168 9440aeab Michael Hanselmann

169 9440aeab Michael Hanselmann
  """
170 9440aeab Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
171 9440aeab Michael Hanselmann
  utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
172 9440aeab Michael Hanselmann
173 9440aeab Michael Hanselmann
174 c8a0948f Michael Hanselmann
def _RemoveHostFromEtcHosts(hostname):
175 9440aeab Michael Hanselmann
  """Wrapper around utils.RemoveEtcHostsEntry.
176 c8a0948f Michael Hanselmann

177 c8a0948f Michael Hanselmann
  """
178 c8a0948f Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
179 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
180 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
181 c8a0948f Michael Hanselmann
182 c8a0948f Michael Hanselmann
183 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
184 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
185 83120a01 Michael Hanselmann

186 83120a01 Michael Hanselmann
  Args:
187 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
188 83120a01 Michael Hanselmann

189 83120a01 Michael Hanselmann
  """
190 3312b702 Iustin Pop
  if not isinstance(nodes, list):
191 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
192 dcb93971 Michael Hanselmann
193 dcb93971 Michael Hanselmann
  if nodes:
194 3312b702 Iustin Pop
    wanted = []
195 dcb93971 Michael Hanselmann
196 dcb93971 Michael Hanselmann
    for name in nodes:
197 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
198 dcb93971 Michael Hanselmann
      if node is None:
199 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
200 3312b702 Iustin Pop
      wanted.append(node)
201 dcb93971 Michael Hanselmann
202 dcb93971 Michael Hanselmann
  else:
203 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
204 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
205 3312b702 Iustin Pop
206 3312b702 Iustin Pop
207 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
208 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
209 3312b702 Iustin Pop

210 3312b702 Iustin Pop
  Args:
211 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
212 3312b702 Iustin Pop

213 3312b702 Iustin Pop
  """
214 3312b702 Iustin Pop
  if not isinstance(instances, list):
215 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
216 3312b702 Iustin Pop
217 3312b702 Iustin Pop
  if instances:
218 3312b702 Iustin Pop
    wanted = []
219 3312b702 Iustin Pop
220 3312b702 Iustin Pop
    for name in instances:
221 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
222 3312b702 Iustin Pop
      if instance is None:
223 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
224 3312b702 Iustin Pop
      wanted.append(instance)
225 3312b702 Iustin Pop
226 3312b702 Iustin Pop
  else:
227 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
228 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
229 dcb93971 Michael Hanselmann
230 dcb93971 Michael Hanselmann
231 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
232 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
233 83120a01 Michael Hanselmann

234 83120a01 Michael Hanselmann
  Args:
235 83120a01 Michael Hanselmann
    static: Static fields
236 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
237 83120a01 Michael Hanselmann

238 83120a01 Michael Hanselmann
  """
239 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
240 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
241 dcb93971 Michael Hanselmann
242 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
243 dcb93971 Michael Hanselmann
244 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
245 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
246 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
247 3ecf6786 Iustin Pop
                                          difference(all_fields)))
248 dcb93971 Michael Hanselmann
249 dcb93971 Michael Hanselmann
250 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
251 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
252 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
253 ecb215b5 Michael Hanselmann

254 ecb215b5 Michael Hanselmann
  Args:
255 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
256 396e1b78 Michael Hanselmann
  """
257 396e1b78 Michael Hanselmann
  env = {
258 0e137c28 Iustin Pop
    "OP_TARGET": name,
259 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
260 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
261 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
262 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
263 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
264 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
265 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
266 396e1b78 Michael Hanselmann
  }
267 396e1b78 Michael Hanselmann
268 396e1b78 Michael Hanselmann
  if nics:
269 396e1b78 Michael Hanselmann
    nic_count = len(nics)
270 53e4e875 Guido Trotter
    for idx, (ip, bridge, mac) in enumerate(nics):
271 396e1b78 Michael Hanselmann
      if ip is None:
272 396e1b78 Michael Hanselmann
        ip = ""
273 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
274 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
275 53e4e875 Guido Trotter
      env["INSTANCE_NIC%d_HWADDR" % idx] = mac
276 396e1b78 Michael Hanselmann
  else:
277 396e1b78 Michael Hanselmann
    nic_count = 0
278 396e1b78 Michael Hanselmann
279 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
280 396e1b78 Michael Hanselmann
281 396e1b78 Michael Hanselmann
  return env
282 396e1b78 Michael Hanselmann
283 396e1b78 Michael Hanselmann
284 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
285 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
286 ecb215b5 Michael Hanselmann

287 ecb215b5 Michael Hanselmann
  Args:
288 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
289 ecb215b5 Michael Hanselmann
    override: dict of values to override
290 ecb215b5 Michael Hanselmann
  """
291 396e1b78 Michael Hanselmann
  args = {
292 396e1b78 Michael Hanselmann
    'name': instance.name,
293 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
294 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
295 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
296 396e1b78 Michael Hanselmann
    'status': instance.os,
297 396e1b78 Michael Hanselmann
    'memory': instance.memory,
298 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
299 53e4e875 Guido Trotter
    'nics': [(nic.ip, nic.bridge, nic.mac) for nic in instance.nics],
300 396e1b78 Michael Hanselmann
  }
301 396e1b78 Michael Hanselmann
  if override:
302 396e1b78 Michael Hanselmann
    args.update(override)
303 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
304 396e1b78 Michael Hanselmann
305 396e1b78 Michael Hanselmann
306 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
307 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
308 a8083063 Iustin Pop

309 a8083063 Iustin Pop
  Args:
310 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
311 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
312 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
313 a8083063 Iustin Pop

314 a8083063 Iustin Pop
  """
315 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
316 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
317 a8083063 Iustin Pop
  else:
318 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
319 a8083063 Iustin Pop
320 a8083063 Iustin Pop
  inthere = False
321 a8083063 Iustin Pop
322 a8083063 Iustin Pop
  save_lines = []
323 a8083063 Iustin Pop
  add_lines = []
324 a8083063 Iustin Pop
  removed = False
325 a8083063 Iustin Pop
326 4cc2a728 Michael Hanselmann
  for rawline in f:
327 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
328 a8083063 Iustin Pop
329 4cc2a728 Michael Hanselmann
    parts = rawline.rstrip('\r\n').split()
330 4cc2a728 Michael Hanselmann
331 4cc2a728 Michael Hanselmann
    # Ignore unwanted lines
332 4cc2a728 Michael Hanselmann
    if len(parts) >= 3 and not rawline.lstrip()[0] == '#':
333 4cc2a728 Michael Hanselmann
      fields = parts[0].split(',')
334 4cc2a728 Michael Hanselmann
      key = parts[2]
335 4cc2a728 Michael Hanselmann
336 4cc2a728 Michael Hanselmann
      haveall = True
337 4cc2a728 Michael Hanselmann
      havesome = False
338 4cc2a728 Michael Hanselmann
      for spec in [ ip, fullnode ]:
339 4cc2a728 Michael Hanselmann
        if spec not in fields:
340 4cc2a728 Michael Hanselmann
          haveall = False
341 4cc2a728 Michael Hanselmann
        if spec in fields:
342 4cc2a728 Michael Hanselmann
          havesome = True
343 4cc2a728 Michael Hanselmann
344 4cc2a728 Michael Hanselmann
      logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
345 4cc2a728 Michael Hanselmann
      if haveall and key == pubkey:
346 4cc2a728 Michael Hanselmann
        inthere = True
347 4cc2a728 Michael Hanselmann
        save_lines.append(rawline)
348 4cc2a728 Michael Hanselmann
        logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
349 4cc2a728 Michael Hanselmann
        continue
350 4cc2a728 Michael Hanselmann
351 4cc2a728 Michael Hanselmann
      if havesome and (not haveall or key != pubkey):
352 4cc2a728 Michael Hanselmann
        removed = True
353 4cc2a728 Michael Hanselmann
        logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
354 4cc2a728 Michael Hanselmann
        continue
355 a8083063 Iustin Pop
356 a8083063 Iustin Pop
    save_lines.append(rawline)
357 a8083063 Iustin Pop
358 a8083063 Iustin Pop
  if not inthere:
359 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
360 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
361 a8083063 Iustin Pop
362 a8083063 Iustin Pop
  if removed:
363 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
364 a8083063 Iustin Pop
365 a8083063 Iustin Pop
    # Write a new file and replace old.
366 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
367 82122173 Iustin Pop
                                   constants.DATA_DIR)
368 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
369 82122173 Iustin Pop
    try:
370 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
371 82122173 Iustin Pop
    finally:
372 82122173 Iustin Pop
      newfile.close()
373 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
374 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
375 a8083063 Iustin Pop
376 a8083063 Iustin Pop
  elif add_lines:
377 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
378 a8083063 Iustin Pop
    f.seek(0, 2)
379 a8083063 Iustin Pop
    for add in add_lines:
380 a8083063 Iustin Pop
      f.write(add)
381 a8083063 Iustin Pop
382 a8083063 Iustin Pop
  f.close()
383 a8083063 Iustin Pop
384 a8083063 Iustin Pop
385 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
386 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
387 a8083063 Iustin Pop

388 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
389 a8083063 Iustin Pop
  is the error message.
390 a8083063 Iustin Pop

391 a8083063 Iustin Pop
  """
392 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
393 a8083063 Iustin Pop
  if vgsize is None:
394 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
395 a8083063 Iustin Pop
  elif vgsize < 20480:
396 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
397 191a8385 Guido Trotter
            (vgname, vgsize))
398 a8083063 Iustin Pop
  return None
399 a8083063 Iustin Pop
400 a8083063 Iustin Pop
401 a8083063 Iustin Pop
def _InitSSHSetup(node):
402 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
403 a8083063 Iustin Pop

404 a8083063 Iustin Pop

405 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
406 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
407 a8083063 Iustin Pop

408 a8083063 Iustin Pop
  Args:
409 a8083063 Iustin Pop
    node: the name of this host as a fqdn
410 a8083063 Iustin Pop

411 a8083063 Iustin Pop
  """
412 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
413 a8083063 Iustin Pop
414 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
415 70d9e3d8 Iustin Pop
    if os.path.exists(name):
416 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
417 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
418 a8083063 Iustin Pop
419 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
420 70d9e3d8 Iustin Pop
                         "-f", priv_key,
421 a8083063 Iustin Pop
                         "-q", "-N", ""])
422 a8083063 Iustin Pop
  if result.failed:
423 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
424 3ecf6786 Iustin Pop
                             result.output)
425 a8083063 Iustin Pop
426 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
427 a8083063 Iustin Pop
  try:
428 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
429 a8083063 Iustin Pop
  finally:
430 a8083063 Iustin Pop
    f.close()
431 a8083063 Iustin Pop
432 a8083063 Iustin Pop
433 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
434 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
435 a8083063 Iustin Pop

436 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
437 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
438 a8083063 Iustin Pop

439 a8083063 Iustin Pop
  """
440 a8083063 Iustin Pop
  # Create pseudo random password
441 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
442 a8083063 Iustin Pop
  # and write it into sstore
443 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
444 a8083063 Iustin Pop
445 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
446 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
447 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
448 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
449 a8083063 Iustin Pop
  if result.failed:
450 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
451 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
452 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
453 a8083063 Iustin Pop
454 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
455 a8083063 Iustin Pop
456 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
457 a8083063 Iustin Pop
458 a8083063 Iustin Pop
  if result.failed:
459 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
460 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
461 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
462 a8083063 Iustin Pop
463 a8083063 Iustin Pop
464 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
465 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
466 bf6929a2 Alexander Schreiber

467 bf6929a2 Alexander Schreiber
  """
468 bf6929a2 Alexander Schreiber
  # check bridges existance
469 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
470 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
471 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
472 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
473 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
474 bf6929a2 Alexander Schreiber
475 bf6929a2 Alexander Schreiber
476 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
477 a8083063 Iustin Pop
  """Initialise the cluster.
478 a8083063 Iustin Pop

479 a8083063 Iustin Pop
  """
480 a8083063 Iustin Pop
  HPATH = "cluster-init"
481 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
482 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
483 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
484 a8083063 Iustin Pop
  REQ_CLUSTER = False
485 a8083063 Iustin Pop
486 a8083063 Iustin Pop
  def BuildHooksEnv(self):
487 a8083063 Iustin Pop
    """Build hooks env.
488 a8083063 Iustin Pop

489 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
490 a8083063 Iustin Pop
    ourselves in the post-run node list.
491 a8083063 Iustin Pop

492 a8083063 Iustin Pop
    """
493 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
494 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
495 a8083063 Iustin Pop
496 a8083063 Iustin Pop
  def CheckPrereq(self):
497 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
498 a8083063 Iustin Pop

499 a8083063 Iustin Pop
    """
500 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
501 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
502 a8083063 Iustin Pop
503 2a6469d5 Alexander Schreiber
    if self.op.hypervisor_type == constants.HT_XEN_HVM31:
504 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
505 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Please prepare the cluster VNC"
506 2a6469d5 Alexander Schreiber
                                   "password file %s" %
507 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
508 2a6469d5 Alexander Schreiber
509 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
510 ff98055b Iustin Pop
511 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
512 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
513 130e907e Iustin Pop
                                 " range (%s). Please fix DNS or /etc/hosts." %
514 bcf043c9 Iustin Pop
                                 (hostname.ip,))
515 130e907e Iustin Pop
516 89e1fc26 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
517 a8083063 Iustin Pop
518 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, hostname.ip,
519 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
520 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
521 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
522 3ecf6786 Iustin Pop
                                 " belong to this host."
523 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
524 a8083063 Iustin Pop
525 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
526 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
527 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
528 16abfbc2 Alexander Schreiber
    if (secondary_ip and
529 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
530 16abfbc2 Alexander Schreiber
        (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
531 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT))):
532 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("You gave %s as secondary IP,"
533 f4bc1f2c Michael Hanselmann
                                 " but it does not belong to this host." %
534 16abfbc2 Alexander Schreiber
                                 secondary_ip)
535 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
536 a8083063 Iustin Pop
537 a8083063 Iustin Pop
    # checks presence of the volume group given
538 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
539 a8083063 Iustin Pop
540 a8083063 Iustin Pop
    if vgstatus:
541 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
542 a8083063 Iustin Pop
543 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
544 a8083063 Iustin Pop
                    self.op.mac_prefix):
545 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
546 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
547 a8083063 Iustin Pop
548 2584d4a4 Alexander Schreiber
    if self.op.hypervisor_type not in constants.HYPER_TYPES:
549 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
550 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
551 a8083063 Iustin Pop
552 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
553 880478f8 Iustin Pop
    if result.failed:
554 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
555 8925faaa Iustin Pop
                                 (self.op.master_netdev,
556 8925faaa Iustin Pop
                                  result.output.strip()))
557 880478f8 Iustin Pop
558 7dd30006 Michael Hanselmann
    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
559 7dd30006 Michael Hanselmann
            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
560 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("Init.d script '%s' missing or not"
561 f4bc1f2c Michael Hanselmann
                                 " executable." % constants.NODE_INITD_SCRIPT)
562 c7b46d59 Iustin Pop
563 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
564 a8083063 Iustin Pop
    """Initialize the cluster.
565 a8083063 Iustin Pop

566 a8083063 Iustin Pop
    """
567 a8083063 Iustin Pop
    clustername = self.clustername
568 a8083063 Iustin Pop
    hostname = self.hostname
569 a8083063 Iustin Pop
570 a8083063 Iustin Pop
    # set up the simple store
571 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
572 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
573 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
574 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
575 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
576 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
577 a8083063 Iustin Pop
578 a8083063 Iustin Pop
    # set up the inter-node password and certificate
579 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
580 a8083063 Iustin Pop
581 a8083063 Iustin Pop
    # start the master ip
582 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
583 a8083063 Iustin Pop
584 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
585 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
586 a8083063 Iustin Pop
    try:
587 a8083063 Iustin Pop
      sshline = f.read()
588 a8083063 Iustin Pop
    finally:
589 a8083063 Iustin Pop
      f.close()
590 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
591 a8083063 Iustin Pop
592 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(hostname.name)
593 a8083063 Iustin Pop
594 bcf043c9 Iustin Pop
    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
595 a8083063 Iustin Pop
596 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
597 a8083063 Iustin Pop
598 a8083063 Iustin Pop
    # init of cluster config file
599 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
600 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
601 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
602 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
603 a8083063 Iustin Pop
604 a8083063 Iustin Pop
605 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
606 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
607 a8083063 Iustin Pop

608 a8083063 Iustin Pop
  """
609 a8083063 Iustin Pop
  _OP_REQP = []
610 a8083063 Iustin Pop
611 a8083063 Iustin Pop
  def CheckPrereq(self):
612 a8083063 Iustin Pop
    """Check prerequisites.
613 a8083063 Iustin Pop

614 a8083063 Iustin Pop
    This checks whether the cluster is empty.
615 a8083063 Iustin Pop

616 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
617 a8083063 Iustin Pop

618 a8083063 Iustin Pop
    """
619 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
620 a8083063 Iustin Pop
621 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
622 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
623 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
624 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
625 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
626 db915bd1 Michael Hanselmann
    if instancelist:
627 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
628 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
629 a8083063 Iustin Pop
630 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
631 a8083063 Iustin Pop
    """Destroys the cluster.
632 a8083063 Iustin Pop

633 a8083063 Iustin Pop
    """
634 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
635 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
636 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
637 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
638 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
639 a8083063 Iustin Pop
640 a8083063 Iustin Pop
641 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
642 a8083063 Iustin Pop
  """Verifies the cluster status.
643 a8083063 Iustin Pop

644 a8083063 Iustin Pop
  """
645 a8083063 Iustin Pop
  _OP_REQP = []
646 a8083063 Iustin Pop
647 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
648 a8083063 Iustin Pop
                  remote_version, feedback_fn):
649 a8083063 Iustin Pop
    """Run multiple tests against a node.
650 a8083063 Iustin Pop

651 a8083063 Iustin Pop
    Test list:
652 a8083063 Iustin Pop
      - compares ganeti version
653 a8083063 Iustin Pop
      - checks vg existance and size > 20G
654 a8083063 Iustin Pop
      - checks config file checksum
655 a8083063 Iustin Pop
      - checks ssh to other nodes
656 a8083063 Iustin Pop

657 a8083063 Iustin Pop
    Args:
658 a8083063 Iustin Pop
      node: name of the node to check
659 a8083063 Iustin Pop
      file_list: required list of files
660 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
661 098c0958 Michael Hanselmann

662 a8083063 Iustin Pop
    """
663 a8083063 Iustin Pop
    # compares ganeti version
664 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
665 a8083063 Iustin Pop
    if not remote_version:
666 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
667 a8083063 Iustin Pop
      return True
668 a8083063 Iustin Pop
669 a8083063 Iustin Pop
    if local_version != remote_version:
670 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
671 a8083063 Iustin Pop
                      (local_version, node, remote_version))
672 a8083063 Iustin Pop
      return True
673 a8083063 Iustin Pop
674 a8083063 Iustin Pop
    # checks vg existance and size > 20G
675 a8083063 Iustin Pop
676 a8083063 Iustin Pop
    bad = False
677 a8083063 Iustin Pop
    if not vglist:
678 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
679 a8083063 Iustin Pop
                      (node,))
680 a8083063 Iustin Pop
      bad = True
681 a8083063 Iustin Pop
    else:
682 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
683 a8083063 Iustin Pop
      if vgstatus:
684 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
685 a8083063 Iustin Pop
        bad = True
686 a8083063 Iustin Pop
687 a8083063 Iustin Pop
    # checks config file checksum
688 a8083063 Iustin Pop
    # checks ssh to any
689 a8083063 Iustin Pop
690 a8083063 Iustin Pop
    if 'filelist' not in node_result:
691 a8083063 Iustin Pop
      bad = True
692 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
693 a8083063 Iustin Pop
    else:
694 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
695 a8083063 Iustin Pop
      for file_name in file_list:
696 a8083063 Iustin Pop
        if file_name not in remote_cksum:
697 a8083063 Iustin Pop
          bad = True
698 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
699 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
700 a8083063 Iustin Pop
          bad = True
701 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
702 a8083063 Iustin Pop
703 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
704 a8083063 Iustin Pop
      bad = True
705 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
706 a8083063 Iustin Pop
    else:
707 a8083063 Iustin Pop
      if node_result['nodelist']:
708 a8083063 Iustin Pop
        bad = True
709 a8083063 Iustin Pop
        for node in node_result['nodelist']:
710 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
711 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
712 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
713 a8083063 Iustin Pop
    if hyp_result is not None:
714 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
715 a8083063 Iustin Pop
    return bad
716 a8083063 Iustin Pop
717 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
718 a8083063 Iustin Pop
    """Verify an instance.
719 a8083063 Iustin Pop

720 a8083063 Iustin Pop
    This function checks to see if the required block devices are
721 a8083063 Iustin Pop
    available on the instance's node.
722 a8083063 Iustin Pop

723 a8083063 Iustin Pop
    """
724 a8083063 Iustin Pop
    bad = False
725 a8083063 Iustin Pop
726 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
727 a8083063 Iustin Pop
    if not instance in instancelist:
728 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
729 a8083063 Iustin Pop
                      (instance, instancelist))
730 a8083063 Iustin Pop
      bad = True
731 a8083063 Iustin Pop
732 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
733 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
734 a8083063 Iustin Pop
735 a8083063 Iustin Pop
    node_vol_should = {}
736 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
737 a8083063 Iustin Pop
738 a8083063 Iustin Pop
    for node in node_vol_should:
739 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
740 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
741 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
742 a8083063 Iustin Pop
                          (volume, node))
743 a8083063 Iustin Pop
          bad = True
744 a8083063 Iustin Pop
745 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
746 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
747 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
748 a8083063 Iustin Pop
                        (instance, node_current))
749 a8083063 Iustin Pop
        bad = True
750 a8083063 Iustin Pop
751 a8083063 Iustin Pop
    for node in node_instance:
752 a8083063 Iustin Pop
      if (not node == node_current):
753 a8083063 Iustin Pop
        if instance in node_instance[node]:
754 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
755 a8083063 Iustin Pop
                          (instance, node))
756 a8083063 Iustin Pop
          bad = True
757 a8083063 Iustin Pop
758 6a438c98 Michael Hanselmann
    return bad
759 a8083063 Iustin Pop
760 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
761 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
762 a8083063 Iustin Pop

763 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
764 a8083063 Iustin Pop
    reported as unknown.
765 a8083063 Iustin Pop

766 a8083063 Iustin Pop
    """
767 a8083063 Iustin Pop
    bad = False
768 a8083063 Iustin Pop
769 a8083063 Iustin Pop
    for node in node_vol_is:
770 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
771 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
772 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
773 a8083063 Iustin Pop
                      (volume, node))
774 a8083063 Iustin Pop
          bad = True
775 a8083063 Iustin Pop
    return bad
776 a8083063 Iustin Pop
777 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
778 a8083063 Iustin Pop
    """Verify the list of running instances.
779 a8083063 Iustin Pop

780 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
781 a8083063 Iustin Pop

782 a8083063 Iustin Pop
    """
783 a8083063 Iustin Pop
    bad = False
784 a8083063 Iustin Pop
    for node in node_instance:
785 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
786 a8083063 Iustin Pop
        if runninginstance not in instancelist:
787 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
788 a8083063 Iustin Pop
                          (runninginstance, node))
789 a8083063 Iustin Pop
          bad = True
790 a8083063 Iustin Pop
    return bad
791 a8083063 Iustin Pop
792 a8083063 Iustin Pop
  def CheckPrereq(self):
793 a8083063 Iustin Pop
    """Check prerequisites.
794 a8083063 Iustin Pop

795 a8083063 Iustin Pop
    This has no prerequisites.
796 a8083063 Iustin Pop

797 a8083063 Iustin Pop
    """
798 a8083063 Iustin Pop
    pass
799 a8083063 Iustin Pop
800 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
801 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
802 a8083063 Iustin Pop

803 a8083063 Iustin Pop
    """
804 a8083063 Iustin Pop
    bad = False
805 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
806 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
807 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
808 a8083063 Iustin Pop
809 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
810 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
811 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
812 a8083063 Iustin Pop
    node_volume = {}
813 a8083063 Iustin Pop
    node_instance = {}
814 a8083063 Iustin Pop
815 a8083063 Iustin Pop
    # FIXME: verify OS list
816 a8083063 Iustin Pop
    # do local checksums
817 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
818 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
819 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
820 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
821 a8083063 Iustin Pop
822 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
823 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
824 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
825 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
826 a8083063 Iustin Pop
    node_verify_param = {
827 a8083063 Iustin Pop
      'filelist': file_names,
828 a8083063 Iustin Pop
      'nodelist': nodelist,
829 a8083063 Iustin Pop
      'hypervisor': None,
830 a8083063 Iustin Pop
      }
831 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
832 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
833 a8083063 Iustin Pop
834 a8083063 Iustin Pop
    for node in nodelist:
835 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
836 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
837 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
838 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
839 a8083063 Iustin Pop
      bad = bad or result
840 a8083063 Iustin Pop
841 a8083063 Iustin Pop
      # node_volume
842 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
843 a8083063 Iustin Pop
844 b63ed789 Iustin Pop
      if isinstance(volumeinfo, basestring):
845 b63ed789 Iustin Pop
        feedback_fn("  - ERROR: LVM problem on node %s: %s" %
846 b63ed789 Iustin Pop
                    (node, volumeinfo[-400:].encode('string_escape')))
847 b63ed789 Iustin Pop
        bad = True
848 b63ed789 Iustin Pop
        node_volume[node] = {}
849 b63ed789 Iustin Pop
      elif not isinstance(volumeinfo, dict):
850 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
851 a8083063 Iustin Pop
        bad = True
852 a8083063 Iustin Pop
        continue
853 b63ed789 Iustin Pop
      else:
854 b63ed789 Iustin Pop
        node_volume[node] = volumeinfo
855 a8083063 Iustin Pop
856 a8083063 Iustin Pop
      # node_instance
857 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
858 a8083063 Iustin Pop
      if type(nodeinstance) != list:
859 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
860 a8083063 Iustin Pop
        bad = True
861 a8083063 Iustin Pop
        continue
862 a8083063 Iustin Pop
863 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
864 a8083063 Iustin Pop
865 a8083063 Iustin Pop
    node_vol_should = {}
866 a8083063 Iustin Pop
867 a8083063 Iustin Pop
    for instance in instancelist:
868 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
869 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
870 a8083063 Iustin Pop
                                     feedback_fn)
871 a8083063 Iustin Pop
      bad = bad or result
872 a8083063 Iustin Pop
873 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
874 a8083063 Iustin Pop
875 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
876 a8083063 Iustin Pop
877 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
878 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
879 a8083063 Iustin Pop
                                       feedback_fn)
880 a8083063 Iustin Pop
    bad = bad or result
881 a8083063 Iustin Pop
882 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
883 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
884 a8083063 Iustin Pop
                                         feedback_fn)
885 a8083063 Iustin Pop
    bad = bad or result
886 a8083063 Iustin Pop
887 a8083063 Iustin Pop
    return int(bad)
888 a8083063 Iustin Pop
889 a8083063 Iustin Pop
890 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
891 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
892 2c95a8d4 Iustin Pop

893 2c95a8d4 Iustin Pop
  """
894 2c95a8d4 Iustin Pop
  _OP_REQP = []
895 2c95a8d4 Iustin Pop
896 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
897 2c95a8d4 Iustin Pop
    """Check prerequisites.
898 2c95a8d4 Iustin Pop

899 2c95a8d4 Iustin Pop
    This has no prerequisites.
900 2c95a8d4 Iustin Pop

901 2c95a8d4 Iustin Pop
    """
902 2c95a8d4 Iustin Pop
    pass
903 2c95a8d4 Iustin Pop
904 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
905 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
906 2c95a8d4 Iustin Pop

907 2c95a8d4 Iustin Pop
    """
908 b63ed789 Iustin Pop
    result = res_nodes, res_nlvm, res_instances, res_missing = [], {}, [], {}
909 2c95a8d4 Iustin Pop
910 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
911 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
912 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
913 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
914 2c95a8d4 Iustin Pop
915 2c95a8d4 Iustin Pop
    nv_dict = {}
916 2c95a8d4 Iustin Pop
    for inst in instances:
917 2c95a8d4 Iustin Pop
      inst_lvs = {}
918 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
919 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
920 2c95a8d4 Iustin Pop
        continue
921 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
922 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
923 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
924 2c95a8d4 Iustin Pop
        for vol in vol_list:
925 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
926 2c95a8d4 Iustin Pop
927 2c95a8d4 Iustin Pop
    if not nv_dict:
928 2c95a8d4 Iustin Pop
      return result
929 2c95a8d4 Iustin Pop
930 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
931 2c95a8d4 Iustin Pop
932 2c95a8d4 Iustin Pop
    to_act = set()
933 2c95a8d4 Iustin Pop
    for node in nodes:
934 2c95a8d4 Iustin Pop
      # node_volume
935 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
936 2c95a8d4 Iustin Pop
937 b63ed789 Iustin Pop
      if isinstance(lvs, basestring):
938 b63ed789 Iustin Pop
        logger.Info("error enumerating LVs on node %s: %s" % (node, lvs))
939 b63ed789 Iustin Pop
        res_nlvm[node] = lvs
940 b63ed789 Iustin Pop
      elif not isinstance(lvs, dict):
941 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
942 2c95a8d4 Iustin Pop
                    (node,))
943 2c95a8d4 Iustin Pop
        res_nodes.append(node)
944 2c95a8d4 Iustin Pop
        continue
945 2c95a8d4 Iustin Pop
946 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
947 b63ed789 Iustin Pop
        inst = nv_dict.pop((node, lv_name), None)
948 b63ed789 Iustin Pop
        if (not lv_online and inst is not None
949 b63ed789 Iustin Pop
            and inst.name not in res_instances):
950 b08d5a87 Iustin Pop
          res_instances.append(inst.name)
951 2c95a8d4 Iustin Pop
952 b63ed789 Iustin Pop
    # any leftover items in nv_dict are missing LVs, let's arrange the
953 b63ed789 Iustin Pop
    # data better
954 b63ed789 Iustin Pop
    for key, inst in nv_dict.iteritems():
955 b63ed789 Iustin Pop
      if inst.name not in res_missing:
956 b63ed789 Iustin Pop
        res_missing[inst.name] = []
957 b63ed789 Iustin Pop
      res_missing[inst.name].append(key)
958 b63ed789 Iustin Pop
959 2c95a8d4 Iustin Pop
    return result
960 2c95a8d4 Iustin Pop
961 2c95a8d4 Iustin Pop
962 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
963 07bd8a51 Iustin Pop
  """Rename the cluster.
964 07bd8a51 Iustin Pop

965 07bd8a51 Iustin Pop
  """
966 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
967 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
968 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
969 07bd8a51 Iustin Pop
970 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
971 07bd8a51 Iustin Pop
    """Build hooks env.
972 07bd8a51 Iustin Pop

973 07bd8a51 Iustin Pop
    """
974 07bd8a51 Iustin Pop
    env = {
975 0e137c28 Iustin Pop
      "OP_TARGET": self.op.sstore.GetClusterName(),
976 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
977 07bd8a51 Iustin Pop
      }
978 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
979 07bd8a51 Iustin Pop
    return env, [mn], [mn]
980 07bd8a51 Iustin Pop
981 07bd8a51 Iustin Pop
  def CheckPrereq(self):
982 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
983 07bd8a51 Iustin Pop

984 07bd8a51 Iustin Pop
    """
985 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
986 07bd8a51 Iustin Pop
987 bcf043c9 Iustin Pop
    new_name = hostname.name
988 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
989 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
990 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
991 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
992 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
993 07bd8a51 Iustin Pop
                                 " cluster has changed")
994 07bd8a51 Iustin Pop
    if new_ip != old_ip:
995 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
996 07bd8a51 Iustin Pop
      if not result.failed:
997 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
998 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
999 07bd8a51 Iustin Pop
                                   new_ip)
1000 07bd8a51 Iustin Pop
1001 07bd8a51 Iustin Pop
    self.op.name = new_name
1002 07bd8a51 Iustin Pop
1003 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
1004 07bd8a51 Iustin Pop
    """Rename the cluster.
1005 07bd8a51 Iustin Pop

1006 07bd8a51 Iustin Pop
    """
1007 07bd8a51 Iustin Pop
    clustername = self.op.name
1008 07bd8a51 Iustin Pop
    ip = self.ip
1009 07bd8a51 Iustin Pop
    ss = self.sstore
1010 07bd8a51 Iustin Pop
1011 07bd8a51 Iustin Pop
    # shutdown the master IP
1012 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
1013 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
1014 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
1015 07bd8a51 Iustin Pop
1016 07bd8a51 Iustin Pop
    try:
1017 07bd8a51 Iustin Pop
      # modify the sstore
1018 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1019 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1020 07bd8a51 Iustin Pop
1021 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1022 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1023 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1024 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1025 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1026 07bd8a51 Iustin Pop
1027 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1028 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1029 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1030 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1031 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1032 07bd8a51 Iustin Pop
          if not result[to_node]:
1033 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1034 07bd8a51 Iustin Pop
                         (fname, to_node))
1035 07bd8a51 Iustin Pop
    finally:
1036 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
1037 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1038 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1039 07bd8a51 Iustin Pop
1040 07bd8a51 Iustin Pop
1041 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1042 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1043 a8083063 Iustin Pop

1044 a8083063 Iustin Pop
  """
1045 a8083063 Iustin Pop
  if not instance.disks:
1046 a8083063 Iustin Pop
    return True
1047 a8083063 Iustin Pop
1048 a8083063 Iustin Pop
  if not oneshot:
1049 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1050 a8083063 Iustin Pop
1051 a8083063 Iustin Pop
  node = instance.primary_node
1052 a8083063 Iustin Pop
1053 a8083063 Iustin Pop
  for dev in instance.disks:
1054 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1055 a8083063 Iustin Pop
1056 a8083063 Iustin Pop
  retries = 0
1057 a8083063 Iustin Pop
  while True:
1058 a8083063 Iustin Pop
    max_time = 0
1059 a8083063 Iustin Pop
    done = True
1060 a8083063 Iustin Pop
    cumul_degraded = False
1061 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1062 a8083063 Iustin Pop
    if not rstats:
1063 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1064 a8083063 Iustin Pop
      retries += 1
1065 a8083063 Iustin Pop
      if retries >= 10:
1066 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1067 3ecf6786 Iustin Pop
                                 " aborting." % node)
1068 a8083063 Iustin Pop
      time.sleep(6)
1069 a8083063 Iustin Pop
      continue
1070 a8083063 Iustin Pop
    retries = 0
1071 a8083063 Iustin Pop
    for i in range(len(rstats)):
1072 a8083063 Iustin Pop
      mstat = rstats[i]
1073 a8083063 Iustin Pop
      if mstat is None:
1074 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1075 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1076 a8083063 Iustin Pop
        continue
1077 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1078 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1079 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1080 a8083063 Iustin Pop
      if perc_done is not None:
1081 a8083063 Iustin Pop
        done = False
1082 a8083063 Iustin Pop
        if est_time is not None:
1083 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1084 a8083063 Iustin Pop
          max_time = est_time
1085 a8083063 Iustin Pop
        else:
1086 a8083063 Iustin Pop
          rem_time = "no time estimate"
1087 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1088 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1089 a8083063 Iustin Pop
    if done or oneshot:
1090 a8083063 Iustin Pop
      break
1091 a8083063 Iustin Pop
1092 a8083063 Iustin Pop
    if unlock:
1093 a8083063 Iustin Pop
      utils.Unlock('cmd')
1094 a8083063 Iustin Pop
    try:
1095 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1096 a8083063 Iustin Pop
    finally:
1097 a8083063 Iustin Pop
      if unlock:
1098 a8083063 Iustin Pop
        utils.Lock('cmd')
1099 a8083063 Iustin Pop
1100 a8083063 Iustin Pop
  if done:
1101 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1102 a8083063 Iustin Pop
  return not cumul_degraded
1103 a8083063 Iustin Pop
1104 a8083063 Iustin Pop
1105 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1106 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1107 a8083063 Iustin Pop

1108 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1109 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1110 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1111 0834c866 Iustin Pop

1112 a8083063 Iustin Pop
  """
1113 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1114 0834c866 Iustin Pop
  if ldisk:
1115 0834c866 Iustin Pop
    idx = 6
1116 0834c866 Iustin Pop
  else:
1117 0834c866 Iustin Pop
    idx = 5
1118 a8083063 Iustin Pop
1119 a8083063 Iustin Pop
  result = True
1120 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1121 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1122 a8083063 Iustin Pop
    if not rstats:
1123 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1124 a8083063 Iustin Pop
      result = False
1125 a8083063 Iustin Pop
    else:
1126 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1127 a8083063 Iustin Pop
  if dev.children:
1128 a8083063 Iustin Pop
    for child in dev.children:
1129 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1130 a8083063 Iustin Pop
1131 a8083063 Iustin Pop
  return result
1132 a8083063 Iustin Pop
1133 a8083063 Iustin Pop
1134 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1135 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1136 a8083063 Iustin Pop

1137 a8083063 Iustin Pop
  """
1138 a8083063 Iustin Pop
  _OP_REQP = []
1139 a8083063 Iustin Pop
1140 a8083063 Iustin Pop
  def CheckPrereq(self):
1141 a8083063 Iustin Pop
    """Check prerequisites.
1142 a8083063 Iustin Pop

1143 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1144 a8083063 Iustin Pop

1145 a8083063 Iustin Pop
    """
1146 a8083063 Iustin Pop
    return
1147 a8083063 Iustin Pop
1148 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1149 a8083063 Iustin Pop
    """Compute the list of OSes.
1150 a8083063 Iustin Pop

1151 a8083063 Iustin Pop
    """
1152 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1153 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1154 a8083063 Iustin Pop
    if node_data == False:
1155 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1156 a8083063 Iustin Pop
    return node_data
1157 a8083063 Iustin Pop
1158 a8083063 Iustin Pop
1159 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1160 a8083063 Iustin Pop
  """Logical unit for removing a node.
1161 a8083063 Iustin Pop

1162 a8083063 Iustin Pop
  """
1163 a8083063 Iustin Pop
  HPATH = "node-remove"
1164 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1165 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1166 a8083063 Iustin Pop
1167 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1168 a8083063 Iustin Pop
    """Build hooks env.
1169 a8083063 Iustin Pop

1170 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1171 a8083063 Iustin Pop
    node would not allows itself to run.
1172 a8083063 Iustin Pop

1173 a8083063 Iustin Pop
    """
1174 396e1b78 Michael Hanselmann
    env = {
1175 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1176 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1177 396e1b78 Michael Hanselmann
      }
1178 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1179 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1180 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1181 a8083063 Iustin Pop
1182 a8083063 Iustin Pop
  def CheckPrereq(self):
1183 a8083063 Iustin Pop
    """Check prerequisites.
1184 a8083063 Iustin Pop

1185 a8083063 Iustin Pop
    This checks:
1186 a8083063 Iustin Pop
     - the node exists in the configuration
1187 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1188 a8083063 Iustin Pop
     - it's not the master
1189 a8083063 Iustin Pop

1190 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1191 a8083063 Iustin Pop

1192 a8083063 Iustin Pop
    """
1193 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1194 a8083063 Iustin Pop
    if node is None:
1195 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1196 a8083063 Iustin Pop
1197 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1198 a8083063 Iustin Pop
1199 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1200 a8083063 Iustin Pop
    if node.name == masternode:
1201 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1202 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1203 a8083063 Iustin Pop
1204 a8083063 Iustin Pop
    for instance_name in instance_list:
1205 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1206 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1207 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1208 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1209 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1210 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1211 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1212 a8083063 Iustin Pop
    self.op.node_name = node.name
1213 a8083063 Iustin Pop
    self.node = node
1214 a8083063 Iustin Pop
1215 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1216 a8083063 Iustin Pop
    """Removes the node from the cluster.
1217 a8083063 Iustin Pop

1218 a8083063 Iustin Pop
    """
1219 a8083063 Iustin Pop
    node = self.node
1220 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1221 a8083063 Iustin Pop
                node.name)
1222 a8083063 Iustin Pop
1223 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1224 a8083063 Iustin Pop
1225 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1226 a8083063 Iustin Pop
1227 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1228 a8083063 Iustin Pop
1229 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1230 a8083063 Iustin Pop
1231 c8a0948f Michael Hanselmann
    _RemoveHostFromEtcHosts(node.name)
1232 c8a0948f Michael Hanselmann
1233 a8083063 Iustin Pop
1234 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1235 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1236 a8083063 Iustin Pop

1237 a8083063 Iustin Pop
  """
1238 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1239 a8083063 Iustin Pop
1240 a8083063 Iustin Pop
  def CheckPrereq(self):
1241 a8083063 Iustin Pop
    """Check prerequisites.
1242 a8083063 Iustin Pop

1243 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1244 a8083063 Iustin Pop

1245 a8083063 Iustin Pop
    """
1246 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1247 3ef10550 Michael Hanselmann
                                     "mtotal", "mnode", "mfree",
1248 3ef10550 Michael Hanselmann
                                     "bootid"])
1249 a8083063 Iustin Pop
1250 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1251 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1252 ec223efb Iustin Pop
                               "pip", "sip"],
1253 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1254 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1255 a8083063 Iustin Pop
1256 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1257 a8083063 Iustin Pop
1258 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1259 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1260 a8083063 Iustin Pop

1261 a8083063 Iustin Pop
    """
1262 246e180a Iustin Pop
    nodenames = self.wanted
1263 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1264 a8083063 Iustin Pop
1265 a8083063 Iustin Pop
    # begin data gathering
1266 a8083063 Iustin Pop
1267 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1268 a8083063 Iustin Pop
      live_data = {}
1269 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1270 a8083063 Iustin Pop
      for name in nodenames:
1271 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1272 a8083063 Iustin Pop
        if nodeinfo:
1273 a8083063 Iustin Pop
          live_data[name] = {
1274 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1275 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1276 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1277 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1278 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1279 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1280 a8083063 Iustin Pop
            }
1281 a8083063 Iustin Pop
        else:
1282 a8083063 Iustin Pop
          live_data[name] = {}
1283 a8083063 Iustin Pop
    else:
1284 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1285 a8083063 Iustin Pop
1286 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1287 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1288 a8083063 Iustin Pop
1289 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1290 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1291 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1292 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1293 a8083063 Iustin Pop
1294 ec223efb Iustin Pop
      for instance_name in instancelist:
1295 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1296 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1297 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1298 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1299 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1300 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1301 a8083063 Iustin Pop
1302 a8083063 Iustin Pop
    # end data gathering
1303 a8083063 Iustin Pop
1304 a8083063 Iustin Pop
    output = []
1305 a8083063 Iustin Pop
    for node in nodelist:
1306 a8083063 Iustin Pop
      node_output = []
1307 a8083063 Iustin Pop
      for field in self.op.output_fields:
1308 a8083063 Iustin Pop
        if field == "name":
1309 a8083063 Iustin Pop
          val = node.name
1310 ec223efb Iustin Pop
        elif field == "pinst_list":
1311 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1312 ec223efb Iustin Pop
        elif field == "sinst_list":
1313 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1314 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1315 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1316 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1317 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1318 a8083063 Iustin Pop
        elif field == "pip":
1319 a8083063 Iustin Pop
          val = node.primary_ip
1320 a8083063 Iustin Pop
        elif field == "sip":
1321 a8083063 Iustin Pop
          val = node.secondary_ip
1322 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1323 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1324 a8083063 Iustin Pop
        else:
1325 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1326 a8083063 Iustin Pop
        node_output.append(val)
1327 a8083063 Iustin Pop
      output.append(node_output)
1328 a8083063 Iustin Pop
1329 a8083063 Iustin Pop
    return output
1330 a8083063 Iustin Pop
1331 a8083063 Iustin Pop
1332 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1333 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1334 dcb93971 Michael Hanselmann

1335 dcb93971 Michael Hanselmann
  """
1336 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1337 dcb93971 Michael Hanselmann
1338 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1339 dcb93971 Michael Hanselmann
    """Check prerequisites.
1340 dcb93971 Michael Hanselmann

1341 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1342 dcb93971 Michael Hanselmann

1343 dcb93971 Michael Hanselmann
    """
1344 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1345 dcb93971 Michael Hanselmann
1346 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1347 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1348 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1349 dcb93971 Michael Hanselmann
1350 dcb93971 Michael Hanselmann
1351 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1352 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1353 dcb93971 Michael Hanselmann

1354 dcb93971 Michael Hanselmann
    """
1355 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1356 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1357 dcb93971 Michael Hanselmann
1358 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1359 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1360 dcb93971 Michael Hanselmann
1361 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1362 dcb93971 Michael Hanselmann
1363 dcb93971 Michael Hanselmann
    output = []
1364 dcb93971 Michael Hanselmann
    for node in nodenames:
1365 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1366 37d19eb2 Michael Hanselmann
        continue
1367 37d19eb2 Michael Hanselmann
1368 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1369 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1370 dcb93971 Michael Hanselmann
1371 dcb93971 Michael Hanselmann
      for vol in node_vols:
1372 dcb93971 Michael Hanselmann
        node_output = []
1373 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1374 dcb93971 Michael Hanselmann
          if field == "node":
1375 dcb93971 Michael Hanselmann
            val = node
1376 dcb93971 Michael Hanselmann
          elif field == "phys":
1377 dcb93971 Michael Hanselmann
            val = vol['dev']
1378 dcb93971 Michael Hanselmann
          elif field == "vg":
1379 dcb93971 Michael Hanselmann
            val = vol['vg']
1380 dcb93971 Michael Hanselmann
          elif field == "name":
1381 dcb93971 Michael Hanselmann
            val = vol['name']
1382 dcb93971 Michael Hanselmann
          elif field == "size":
1383 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1384 dcb93971 Michael Hanselmann
          elif field == "instance":
1385 dcb93971 Michael Hanselmann
            for inst in ilist:
1386 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1387 dcb93971 Michael Hanselmann
                continue
1388 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1389 dcb93971 Michael Hanselmann
                val = inst.name
1390 dcb93971 Michael Hanselmann
                break
1391 dcb93971 Michael Hanselmann
            else:
1392 dcb93971 Michael Hanselmann
              val = '-'
1393 dcb93971 Michael Hanselmann
          else:
1394 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1395 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1396 dcb93971 Michael Hanselmann
1397 dcb93971 Michael Hanselmann
        output.append(node_output)
1398 dcb93971 Michael Hanselmann
1399 dcb93971 Michael Hanselmann
    return output
1400 dcb93971 Michael Hanselmann
1401 dcb93971 Michael Hanselmann
1402 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1403 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1404 a8083063 Iustin Pop

1405 a8083063 Iustin Pop
  """
1406 a8083063 Iustin Pop
  HPATH = "node-add"
1407 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1408 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1409 a8083063 Iustin Pop
1410 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1411 a8083063 Iustin Pop
    """Build hooks env.
1412 a8083063 Iustin Pop

1413 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1414 a8083063 Iustin Pop

1415 a8083063 Iustin Pop
    """
1416 a8083063 Iustin Pop
    env = {
1417 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1418 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1419 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1420 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1421 a8083063 Iustin Pop
      }
1422 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1423 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1424 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1425 a8083063 Iustin Pop
1426 a8083063 Iustin Pop
  def CheckPrereq(self):
1427 a8083063 Iustin Pop
    """Check prerequisites.
1428 a8083063 Iustin Pop

1429 a8083063 Iustin Pop
    This checks:
1430 a8083063 Iustin Pop
     - the new node is not already in the config
1431 a8083063 Iustin Pop
     - it is resolvable
1432 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1433 a8083063 Iustin Pop

1434 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1435 a8083063 Iustin Pop

1436 a8083063 Iustin Pop
    """
1437 a8083063 Iustin Pop
    node_name = self.op.node_name
1438 a8083063 Iustin Pop
    cfg = self.cfg
1439 a8083063 Iustin Pop
1440 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1441 a8083063 Iustin Pop
1442 bcf043c9 Iustin Pop
    node = dns_data.name
1443 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1444 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1445 a8083063 Iustin Pop
    if secondary_ip is None:
1446 a8083063 Iustin Pop
      secondary_ip = primary_ip
1447 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1448 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1449 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1450 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1451 a8083063 Iustin Pop
    if node in node_list:
1452 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1453 3ecf6786 Iustin Pop
                                 % node)
1454 a8083063 Iustin Pop
1455 a8083063 Iustin Pop
    for existing_node_name in node_list:
1456 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1457 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1458 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1459 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1460 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1461 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1462 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1463 a8083063 Iustin Pop
1464 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1465 a8083063 Iustin Pop
    # same as for the master
1466 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1467 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1468 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1469 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1470 a8083063 Iustin Pop
      if master_singlehomed:
1471 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1472 3ecf6786 Iustin Pop
                                   " new node has one")
1473 a8083063 Iustin Pop
      else:
1474 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1475 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1476 a8083063 Iustin Pop
1477 a8083063 Iustin Pop
    # checks reachablity
1478 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(utils.HostInfo().name,
1479 16abfbc2 Alexander Schreiber
                         primary_ip,
1480 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
1481 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1482 a8083063 Iustin Pop
1483 a8083063 Iustin Pop
    if not newbie_singlehomed:
1484 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1485 16abfbc2 Alexander Schreiber
      if not utils.TcpPing(myself.secondary_ip,
1486 16abfbc2 Alexander Schreiber
                           secondary_ip,
1487 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT):
1488 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1489 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1490 a8083063 Iustin Pop
1491 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1492 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1493 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1494 a8083063 Iustin Pop
1495 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1496 2a6469d5 Alexander Schreiber
      if not os.path.exists(constants.VNC_PASSWORD_FILE):
1497 2a6469d5 Alexander Schreiber
        raise errors.OpPrereqError("Cluster VNC password file %s missing" %
1498 2a6469d5 Alexander Schreiber
                                   constants.VNC_PASSWORD_FILE)
1499 2a6469d5 Alexander Schreiber
1500 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1501 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1502 a8083063 Iustin Pop

1503 a8083063 Iustin Pop
    """
1504 a8083063 Iustin Pop
    new_node = self.new_node
1505 a8083063 Iustin Pop
    node = new_node.name
1506 a8083063 Iustin Pop
1507 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1508 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1509 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1510 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1511 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1512 a8083063 Iustin Pop
    try:
1513 a8083063 Iustin Pop
      gntpem = f.read(8192)
1514 a8083063 Iustin Pop
    finally:
1515 a8083063 Iustin Pop
      f.close()
1516 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1517 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1518 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1519 a8083063 Iustin Pop
    # parsed by the shell sequence below
1520 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1521 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1522 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1523 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1524 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1525 a8083063 Iustin Pop
1526 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1527 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1528 a8083063 Iustin Pop
    # either by being constants or by the checks above
1529 a8083063 Iustin Pop
    ss = self.sstore
1530 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1531 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1532 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1533 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1534 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1535 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1536 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1537 a8083063 Iustin Pop
1538 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1539 a8083063 Iustin Pop
    if result.failed:
1540 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1541 3ecf6786 Iustin Pop
                               " output: %s" %
1542 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1543 a8083063 Iustin Pop
1544 a8083063 Iustin Pop
    # check connectivity
1545 a8083063 Iustin Pop
    time.sleep(4)
1546 a8083063 Iustin Pop
1547 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1548 a8083063 Iustin Pop
    if result:
1549 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1550 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1551 a8083063 Iustin Pop
                    (node, result))
1552 a8083063 Iustin Pop
      else:
1553 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1554 3ecf6786 Iustin Pop
                                 " node version %s" %
1555 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1556 a8083063 Iustin Pop
    else:
1557 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1558 a8083063 Iustin Pop
1559 a8083063 Iustin Pop
    # setup ssh on node
1560 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1561 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1562 a8083063 Iustin Pop
    keyarray = []
1563 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1564 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1565 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1566 a8083063 Iustin Pop
1567 a8083063 Iustin Pop
    for i in keyfiles:
1568 a8083063 Iustin Pop
      f = open(i, 'r')
1569 a8083063 Iustin Pop
      try:
1570 a8083063 Iustin Pop
        keyarray.append(f.read())
1571 a8083063 Iustin Pop
      finally:
1572 a8083063 Iustin Pop
        f.close()
1573 a8083063 Iustin Pop
1574 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1575 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1576 a8083063 Iustin Pop
1577 a8083063 Iustin Pop
    if not result:
1578 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1579 a8083063 Iustin Pop
1580 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1581 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(new_node.name)
1582 c8a0948f Michael Hanselmann
1583 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1584 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1585 a8083063 Iustin Pop
1586 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1587 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1588 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1589 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1590 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1591 16abfbc2 Alexander Schreiber
                                    10, False):
1592 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1593 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1594 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1595 a8083063 Iustin Pop
1596 ff98055b Iustin Pop
    success, msg = ssh.VerifyNodeHostname(node)
1597 ff98055b Iustin Pop
    if not success:
1598 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1599 f4bc1f2c Michael Hanselmann
                               " than the one the resolver gives: %s."
1600 f4bc1f2c Michael Hanselmann
                               " Please fix and re-run this command." %
1601 ff98055b Iustin Pop
                               (node, msg))
1602 ff98055b Iustin Pop
1603 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1604 a8083063 Iustin Pop
    # including the node just added
1605 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1606 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1607 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1608 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1609 a8083063 Iustin Pop
1610 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1611 82122173 Iustin Pop
    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
1612 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1613 a8083063 Iustin Pop
      for to_node in dist_nodes:
1614 a8083063 Iustin Pop
        if not result[to_node]:
1615 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1616 a8083063 Iustin Pop
                       (fname, to_node))
1617 a8083063 Iustin Pop
1618 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1619 2a6469d5 Alexander Schreiber
    if self.sstore.GetHypervisorType() == constants.HT_XEN_HVM31:
1620 2a6469d5 Alexander Schreiber
      to_copy.append(constants.VNC_PASSWORD_FILE)
1621 a8083063 Iustin Pop
    for fname in to_copy:
1622 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1623 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1624 a8083063 Iustin Pop
1625 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1626 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1627 a8083063 Iustin Pop
1628 a8083063 Iustin Pop
1629 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1630 a8083063 Iustin Pop
  """Failover the master node to the current node.
1631 a8083063 Iustin Pop

1632 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1633 a8083063 Iustin Pop

1634 a8083063 Iustin Pop
  """
1635 a8083063 Iustin Pop
  HPATH = "master-failover"
1636 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1637 a8083063 Iustin Pop
  REQ_MASTER = False
1638 a8083063 Iustin Pop
  _OP_REQP = []
1639 a8083063 Iustin Pop
1640 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1641 a8083063 Iustin Pop
    """Build hooks env.
1642 a8083063 Iustin Pop

1643 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1644 a8083063 Iustin Pop
    the nodes in the post phase.
1645 a8083063 Iustin Pop

1646 a8083063 Iustin Pop
    """
1647 a8083063 Iustin Pop
    env = {
1648 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1649 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1650 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1651 a8083063 Iustin Pop
      }
1652 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1653 a8083063 Iustin Pop
1654 a8083063 Iustin Pop
  def CheckPrereq(self):
1655 a8083063 Iustin Pop
    """Check prerequisites.
1656 a8083063 Iustin Pop

1657 a8083063 Iustin Pop
    This checks that we are not already the master.
1658 a8083063 Iustin Pop

1659 a8083063 Iustin Pop
    """
1660 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1661 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1662 a8083063 Iustin Pop
1663 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1664 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1665 f4bc1f2c Michael Hanselmann
                                 " where you want the new master to be."
1666 f4bc1f2c Michael Hanselmann
                                 " %s is already the master" %
1667 3ecf6786 Iustin Pop
                                 self.old_master)
1668 a8083063 Iustin Pop
1669 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1670 a8083063 Iustin Pop
    """Failover the master node.
1671 a8083063 Iustin Pop

1672 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1673 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1674 a8083063 Iustin Pop
    master.
1675 a8083063 Iustin Pop

1676 a8083063 Iustin Pop
    """
1677 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1678 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1679 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1680 a8083063 Iustin Pop
1681 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1682 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1683 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1684 a8083063 Iustin Pop
1685 880478f8 Iustin Pop
    ss = self.sstore
1686 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1687 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1688 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1689 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1690 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1691 880478f8 Iustin Pop
1692 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1693 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1694 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1695 f4bc1f2c Michael Hanselmann
      feedback_fn("Error in activating the master IP on the new master,"
1696 f4bc1f2c Michael Hanselmann
                  " please fix manually.")
1697 a8083063 Iustin Pop
1698 a8083063 Iustin Pop
1699 a8083063 Iustin Pop
1700 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1701 a8083063 Iustin Pop
  """Query cluster configuration.
1702 a8083063 Iustin Pop

1703 a8083063 Iustin Pop
  """
1704 a8083063 Iustin Pop
  _OP_REQP = []
1705 59322403 Iustin Pop
  REQ_MASTER = False
1706 a8083063 Iustin Pop
1707 a8083063 Iustin Pop
  def CheckPrereq(self):
1708 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1709 a8083063 Iustin Pop

1710 a8083063 Iustin Pop
    """
1711 a8083063 Iustin Pop
    pass
1712 a8083063 Iustin Pop
1713 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1714 a8083063 Iustin Pop
    """Return cluster config.
1715 a8083063 Iustin Pop

1716 a8083063 Iustin Pop
    """
1717 a8083063 Iustin Pop
    result = {
1718 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1719 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1720 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1721 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1722 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1723 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1724 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1725 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1726 a8083063 Iustin Pop
      }
1727 a8083063 Iustin Pop
1728 a8083063 Iustin Pop
    return result
1729 a8083063 Iustin Pop
1730 a8083063 Iustin Pop
1731 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1732 a8083063 Iustin Pop
  """Copy file to cluster.
1733 a8083063 Iustin Pop

1734 a8083063 Iustin Pop
  """
1735 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1736 a8083063 Iustin Pop
1737 a8083063 Iustin Pop
  def CheckPrereq(self):
1738 a8083063 Iustin Pop
    """Check prerequisites.
1739 a8083063 Iustin Pop

1740 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1741 a8083063 Iustin Pop
    of nodes is valid.
1742 a8083063 Iustin Pop

1743 a8083063 Iustin Pop
    """
1744 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1745 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1746 dcb93971 Michael Hanselmann
1747 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1748 a8083063 Iustin Pop
1749 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1750 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1751 a8083063 Iustin Pop

1752 a8083063 Iustin Pop
    Args:
1753 a8083063 Iustin Pop
      opts - class with options as members
1754 a8083063 Iustin Pop
      args - list containing a single element, the file name
1755 a8083063 Iustin Pop
    Opts used:
1756 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1757 a8083063 Iustin Pop

1758 a8083063 Iustin Pop
    """
1759 a8083063 Iustin Pop
    filename = self.op.filename
1760 a8083063 Iustin Pop
1761 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
1762 a8083063 Iustin Pop
1763 a7ba5e53 Iustin Pop
    for node in self.nodes:
1764 a8083063 Iustin Pop
      if node == myname:
1765 a8083063 Iustin Pop
        continue
1766 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1767 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1768 a8083063 Iustin Pop
1769 a8083063 Iustin Pop
1770 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1771 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1772 a8083063 Iustin Pop

1773 a8083063 Iustin Pop
  """
1774 a8083063 Iustin Pop
  _OP_REQP = []
1775 a8083063 Iustin Pop
1776 a8083063 Iustin Pop
  def CheckPrereq(self):
1777 a8083063 Iustin Pop
    """No prerequisites.
1778 a8083063 Iustin Pop

1779 a8083063 Iustin Pop
    """
1780 a8083063 Iustin Pop
    pass
1781 a8083063 Iustin Pop
1782 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1783 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1784 a8083063 Iustin Pop

1785 a8083063 Iustin Pop
    """
1786 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1787 a8083063 Iustin Pop
1788 a8083063 Iustin Pop
1789 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1790 a8083063 Iustin Pop
  """Run a command on some nodes.
1791 a8083063 Iustin Pop

1792 a8083063 Iustin Pop
  """
1793 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1794 a8083063 Iustin Pop
1795 a8083063 Iustin Pop
  def CheckPrereq(self):
1796 a8083063 Iustin Pop
    """Check prerequisites.
1797 a8083063 Iustin Pop

1798 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1799 a8083063 Iustin Pop

1800 a8083063 Iustin Pop
    """
1801 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1802 a8083063 Iustin Pop
1803 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1804 a8083063 Iustin Pop
    """Run a command on some nodes.
1805 a8083063 Iustin Pop

1806 a8083063 Iustin Pop
    """
1807 a8083063 Iustin Pop
    data = []
1808 a8083063 Iustin Pop
    for node in self.nodes:
1809 a7ba5e53 Iustin Pop
      result = ssh.SSHCall(node, "root", self.op.command)
1810 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1811 a8083063 Iustin Pop
1812 a8083063 Iustin Pop
    return data
1813 a8083063 Iustin Pop
1814 a8083063 Iustin Pop
1815 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1816 a8083063 Iustin Pop
  """Bring up an instance's disks.
1817 a8083063 Iustin Pop

1818 a8083063 Iustin Pop
  """
1819 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1820 a8083063 Iustin Pop
1821 a8083063 Iustin Pop
  def CheckPrereq(self):
1822 a8083063 Iustin Pop
    """Check prerequisites.
1823 a8083063 Iustin Pop

1824 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1825 a8083063 Iustin Pop

1826 a8083063 Iustin Pop
    """
1827 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1828 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1829 a8083063 Iustin Pop
    if instance is None:
1830 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1831 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1832 a8083063 Iustin Pop
    self.instance = instance
1833 a8083063 Iustin Pop
1834 a8083063 Iustin Pop
1835 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1836 a8083063 Iustin Pop
    """Activate the disks.
1837 a8083063 Iustin Pop

1838 a8083063 Iustin Pop
    """
1839 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1840 a8083063 Iustin Pop
    if not disks_ok:
1841 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1842 a8083063 Iustin Pop
1843 a8083063 Iustin Pop
    return disks_info
1844 a8083063 Iustin Pop
1845 a8083063 Iustin Pop
1846 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1847 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1848 a8083063 Iustin Pop

1849 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1850 a8083063 Iustin Pop

1851 a8083063 Iustin Pop
  Args:
1852 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1853 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1854 a8083063 Iustin Pop
                        in an error return from the function
1855 a8083063 Iustin Pop

1856 a8083063 Iustin Pop
  Returns:
1857 a8083063 Iustin Pop
    false if the operation failed
1858 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1859 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1860 a8083063 Iustin Pop
  """
1861 a8083063 Iustin Pop
  device_info = []
1862 a8083063 Iustin Pop
  disks_ok = True
1863 fdbd668d Iustin Pop
  iname = instance.name
1864 fdbd668d Iustin Pop
  # With the two passes mechanism we try to reduce the window of
1865 fdbd668d Iustin Pop
  # opportunity for the race condition of switching DRBD to primary
1866 fdbd668d Iustin Pop
  # before handshaking occured, but we do not eliminate it
1867 fdbd668d Iustin Pop
1868 fdbd668d Iustin Pop
  # The proper fix would be to wait (with some limits) until the
1869 fdbd668d Iustin Pop
  # connection has been made and drbd transitions from WFConnection
1870 fdbd668d Iustin Pop
  # into any other network-connected state (Connected, SyncTarget,
1871 fdbd668d Iustin Pop
  # SyncSource, etc.)
1872 fdbd668d Iustin Pop
1873 fdbd668d Iustin Pop
  # 1st pass, assemble on all nodes in secondary mode
1874 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1875 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1876 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1877 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, False)
1878 a8083063 Iustin Pop
      if not result:
1879 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1880 fdbd668d Iustin Pop
                     " (is_primary=False, pass=1)" % (inst_disk.iv_name, node))
1881 fdbd668d Iustin Pop
        if not ignore_secondaries:
1882 a8083063 Iustin Pop
          disks_ok = False
1883 fdbd668d Iustin Pop
1884 fdbd668d Iustin Pop
  # FIXME: race condition on drbd migration to primary
1885 fdbd668d Iustin Pop
1886 fdbd668d Iustin Pop
  # 2nd pass, do only the primary node
1887 fdbd668d Iustin Pop
  for inst_disk in instance.disks:
1888 fdbd668d Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1889 fdbd668d Iustin Pop
      if node != instance.primary_node:
1890 fdbd668d Iustin Pop
        continue
1891 fdbd668d Iustin Pop
      cfg.SetDiskID(node_disk, node)
1892 fdbd668d Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, iname, True)
1893 fdbd668d Iustin Pop
      if not result:
1894 fdbd668d Iustin Pop
        logger.Error("could not prepare block device %s on node %s"
1895 fdbd668d Iustin Pop
                     " (is_primary=True, pass=2)" % (inst_disk.iv_name, node))
1896 fdbd668d Iustin Pop
        disks_ok = False
1897 fdbd668d Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name, result))
1898 a8083063 Iustin Pop
1899 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1900 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1901 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1902 b352ab5b Iustin Pop
  for disk in instance.disks:
1903 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1904 b352ab5b Iustin Pop
1905 a8083063 Iustin Pop
  return disks_ok, device_info
1906 a8083063 Iustin Pop
1907 a8083063 Iustin Pop
1908 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1909 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1910 3ecf6786 Iustin Pop

1911 3ecf6786 Iustin Pop
  """
1912 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1913 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1914 fe7b0351 Michael Hanselmann
  if not disks_ok:
1915 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1916 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1917 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1918 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1919 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1920 fe7b0351 Michael Hanselmann
1921 fe7b0351 Michael Hanselmann
1922 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1923 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1924 a8083063 Iustin Pop

1925 a8083063 Iustin Pop
  """
1926 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1927 a8083063 Iustin Pop
1928 a8083063 Iustin Pop
  def CheckPrereq(self):
1929 a8083063 Iustin Pop
    """Check prerequisites.
1930 a8083063 Iustin Pop

1931 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1932 a8083063 Iustin Pop

1933 a8083063 Iustin Pop
    """
1934 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1935 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1936 a8083063 Iustin Pop
    if instance is None:
1937 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1938 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1939 a8083063 Iustin Pop
    self.instance = instance
1940 a8083063 Iustin Pop
1941 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1942 a8083063 Iustin Pop
    """Deactivate the disks
1943 a8083063 Iustin Pop

1944 a8083063 Iustin Pop
    """
1945 a8083063 Iustin Pop
    instance = self.instance
1946 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1947 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1948 a8083063 Iustin Pop
    if not type(ins_l) is list:
1949 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1950 3ecf6786 Iustin Pop
                               instance.primary_node)
1951 a8083063 Iustin Pop
1952 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1953 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1954 3ecf6786 Iustin Pop
                               " block devices.")
1955 a8083063 Iustin Pop
1956 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1957 a8083063 Iustin Pop
1958 a8083063 Iustin Pop
1959 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1960 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1961 a8083063 Iustin Pop

1962 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1963 a8083063 Iustin Pop

1964 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1965 a8083063 Iustin Pop
  ignored.
1966 a8083063 Iustin Pop

1967 a8083063 Iustin Pop
  """
1968 a8083063 Iustin Pop
  result = True
1969 a8083063 Iustin Pop
  for disk in instance.disks:
1970 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1971 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1972 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1973 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1974 a8083063 Iustin Pop
                     (disk.iv_name, node))
1975 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1976 a8083063 Iustin Pop
          result = False
1977 a8083063 Iustin Pop
  return result
1978 a8083063 Iustin Pop
1979 a8083063 Iustin Pop
1980 d4f16fd9 Iustin Pop
def _CheckNodeFreeMemory(cfg, node, reason, requested):
1981 d4f16fd9 Iustin Pop
  """Checks if a node has enough free memory.
1982 d4f16fd9 Iustin Pop

1983 d4f16fd9 Iustin Pop
  This function check if a given node has the needed amount of free
1984 d4f16fd9 Iustin Pop
  memory. In case the node has less memory or we cannot get the
1985 d4f16fd9 Iustin Pop
  information from the node, this function raise an OpPrereqError
1986 d4f16fd9 Iustin Pop
  exception.
1987 d4f16fd9 Iustin Pop

1988 d4f16fd9 Iustin Pop
  Args:
1989 d4f16fd9 Iustin Pop
    - cfg: a ConfigWriter instance
1990 d4f16fd9 Iustin Pop
    - node: the node name
1991 d4f16fd9 Iustin Pop
    - reason: string to use in the error message
1992 d4f16fd9 Iustin Pop
    - requested: the amount of memory in MiB
1993 d4f16fd9 Iustin Pop

1994 d4f16fd9 Iustin Pop
  """
1995 d4f16fd9 Iustin Pop
  nodeinfo = rpc.call_node_info([node], cfg.GetVGName())
1996 d4f16fd9 Iustin Pop
  if not nodeinfo or not isinstance(nodeinfo, dict):
1997 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Could not contact node %s for resource"
1998 d4f16fd9 Iustin Pop
                             " information" % (node,))
1999 d4f16fd9 Iustin Pop
2000 d4f16fd9 Iustin Pop
  free_mem = nodeinfo[node].get('memory_free')
2001 d4f16fd9 Iustin Pop
  if not isinstance(free_mem, int):
2002 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Can't compute free memory on node %s, result"
2003 d4f16fd9 Iustin Pop
                             " was '%s'" % (node, free_mem))
2004 d4f16fd9 Iustin Pop
  if requested > free_mem:
2005 d4f16fd9 Iustin Pop
    raise errors.OpPrereqError("Not enough memory on node %s for %s:"
2006 d4f16fd9 Iustin Pop
                             " needed %s MiB, available %s MiB" %
2007 d4f16fd9 Iustin Pop
                             (node, reason, requested, free_mem))
2008 d4f16fd9 Iustin Pop
2009 d4f16fd9 Iustin Pop
2010 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
2011 a8083063 Iustin Pop
  """Starts an instance.
2012 a8083063 Iustin Pop

2013 a8083063 Iustin Pop
  """
2014 a8083063 Iustin Pop
  HPATH = "instance-start"
2015 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2016 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
2017 a8083063 Iustin Pop
2018 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2019 a8083063 Iustin Pop
    """Build hooks env.
2020 a8083063 Iustin Pop

2021 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2022 a8083063 Iustin Pop

2023 a8083063 Iustin Pop
    """
2024 a8083063 Iustin Pop
    env = {
2025 a8083063 Iustin Pop
      "FORCE": self.op.force,
2026 a8083063 Iustin Pop
      }
2027 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2028 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2029 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2030 a8083063 Iustin Pop
    return env, nl, nl
2031 a8083063 Iustin Pop
2032 a8083063 Iustin Pop
  def CheckPrereq(self):
2033 a8083063 Iustin Pop
    """Check prerequisites.
2034 a8083063 Iustin Pop

2035 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2036 a8083063 Iustin Pop

2037 a8083063 Iustin Pop
    """
2038 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2039 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2040 a8083063 Iustin Pop
    if instance is None:
2041 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2042 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2043 a8083063 Iustin Pop
2044 a8083063 Iustin Pop
    # check bridges existance
2045 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2046 a8083063 Iustin Pop
2047 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, instance.primary_node,
2048 d4f16fd9 Iustin Pop
                         "starting instance %s" % instance.name,
2049 d4f16fd9 Iustin Pop
                         instance.memory)
2050 d4f16fd9 Iustin Pop
2051 a8083063 Iustin Pop
    self.instance = instance
2052 a8083063 Iustin Pop
    self.op.instance_name = instance.name
2053 a8083063 Iustin Pop
2054 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2055 a8083063 Iustin Pop
    """Start the instance.
2056 a8083063 Iustin Pop

2057 a8083063 Iustin Pop
    """
2058 a8083063 Iustin Pop
    instance = self.instance
2059 a8083063 Iustin Pop
    force = self.op.force
2060 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
2061 a8083063 Iustin Pop
2062 a8083063 Iustin Pop
    node_current = instance.primary_node
2063 a8083063 Iustin Pop
2064 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
2065 a8083063 Iustin Pop
2066 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2067 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2068 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2069 a8083063 Iustin Pop
2070 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2071 a8083063 Iustin Pop
2072 a8083063 Iustin Pop
2073 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2074 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2075 bf6929a2 Alexander Schreiber

2076 bf6929a2 Alexander Schreiber
  """
2077 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2078 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2079 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2080 bf6929a2 Alexander Schreiber
2081 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2082 bf6929a2 Alexander Schreiber
    """Build hooks env.
2083 bf6929a2 Alexander Schreiber

2084 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2085 bf6929a2 Alexander Schreiber

2086 bf6929a2 Alexander Schreiber
    """
2087 bf6929a2 Alexander Schreiber
    env = {
2088 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2089 bf6929a2 Alexander Schreiber
      }
2090 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2091 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2092 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2093 bf6929a2 Alexander Schreiber
    return env, nl, nl
2094 bf6929a2 Alexander Schreiber
2095 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2096 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2097 bf6929a2 Alexander Schreiber

2098 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2099 bf6929a2 Alexander Schreiber

2100 bf6929a2 Alexander Schreiber
    """
2101 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2102 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2103 bf6929a2 Alexander Schreiber
    if instance is None:
2104 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2105 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2106 bf6929a2 Alexander Schreiber
2107 bf6929a2 Alexander Schreiber
    # check bridges existance
2108 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2109 bf6929a2 Alexander Schreiber
2110 bf6929a2 Alexander Schreiber
    self.instance = instance
2111 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2112 bf6929a2 Alexander Schreiber
2113 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2114 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2115 bf6929a2 Alexander Schreiber

2116 bf6929a2 Alexander Schreiber
    """
2117 bf6929a2 Alexander Schreiber
    instance = self.instance
2118 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2119 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2120 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2121 bf6929a2 Alexander Schreiber
2122 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2123 bf6929a2 Alexander Schreiber
2124 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2125 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2126 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2127 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2128 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2129 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2130 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2131 bf6929a2 Alexander Schreiber
2132 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2133 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2134 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2135 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2136 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2137 bf6929a2 Alexander Schreiber
    else:
2138 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2139 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2140 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2141 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2142 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2143 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2144 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2145 bf6929a2 Alexander Schreiber
2146 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2147 bf6929a2 Alexander Schreiber
2148 bf6929a2 Alexander Schreiber
2149 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2150 a8083063 Iustin Pop
  """Shutdown an instance.
2151 a8083063 Iustin Pop

2152 a8083063 Iustin Pop
  """
2153 a8083063 Iustin Pop
  HPATH = "instance-stop"
2154 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2155 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2156 a8083063 Iustin Pop
2157 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2158 a8083063 Iustin Pop
    """Build hooks env.
2159 a8083063 Iustin Pop

2160 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2161 a8083063 Iustin Pop

2162 a8083063 Iustin Pop
    """
2163 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2164 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2165 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2166 a8083063 Iustin Pop
    return env, nl, nl
2167 a8083063 Iustin Pop
2168 a8083063 Iustin Pop
  def CheckPrereq(self):
2169 a8083063 Iustin Pop
    """Check prerequisites.
2170 a8083063 Iustin Pop

2171 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2172 a8083063 Iustin Pop

2173 a8083063 Iustin Pop
    """
2174 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2175 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2176 a8083063 Iustin Pop
    if instance is None:
2177 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2178 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2179 a8083063 Iustin Pop
    self.instance = instance
2180 a8083063 Iustin Pop
2181 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2182 a8083063 Iustin Pop
    """Shutdown the instance.
2183 a8083063 Iustin Pop

2184 a8083063 Iustin Pop
    """
2185 a8083063 Iustin Pop
    instance = self.instance
2186 a8083063 Iustin Pop
    node_current = instance.primary_node
2187 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2188 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2189 a8083063 Iustin Pop
2190 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2191 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2192 a8083063 Iustin Pop
2193 a8083063 Iustin Pop
2194 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2195 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2196 fe7b0351 Michael Hanselmann

2197 fe7b0351 Michael Hanselmann
  """
2198 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2199 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2200 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2201 fe7b0351 Michael Hanselmann
2202 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2203 fe7b0351 Michael Hanselmann
    """Build hooks env.
2204 fe7b0351 Michael Hanselmann

2205 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2206 fe7b0351 Michael Hanselmann

2207 fe7b0351 Michael Hanselmann
    """
2208 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2209 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2210 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2211 fe7b0351 Michael Hanselmann
    return env, nl, nl
2212 fe7b0351 Michael Hanselmann
2213 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2214 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2215 fe7b0351 Michael Hanselmann

2216 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2217 fe7b0351 Michael Hanselmann

2218 fe7b0351 Michael Hanselmann
    """
2219 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2220 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2221 fe7b0351 Michael Hanselmann
    if instance is None:
2222 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2223 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2224 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2225 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2226 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2227 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2228 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2229 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2230 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2231 fe7b0351 Michael Hanselmann
    if remote_info:
2232 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2233 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2234 3ecf6786 Iustin Pop
                                  instance.primary_node))
2235 d0834de3 Michael Hanselmann
2236 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2237 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2238 d0834de3 Michael Hanselmann
      # OS verification
2239 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2240 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2241 d0834de3 Michael Hanselmann
      if pnode is None:
2242 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2243 3ecf6786 Iustin Pop
                                   self.op.pnode)
2244 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2245 dfa96ded Guido Trotter
      if not os_obj:
2246 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2247 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2248 d0834de3 Michael Hanselmann
2249 fe7b0351 Michael Hanselmann
    self.instance = instance
2250 fe7b0351 Michael Hanselmann
2251 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2252 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2253 fe7b0351 Michael Hanselmann

2254 fe7b0351 Michael Hanselmann
    """
2255 fe7b0351 Michael Hanselmann
    inst = self.instance
2256 fe7b0351 Michael Hanselmann
2257 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2258 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2259 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2260 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2261 d0834de3 Michael Hanselmann
2262 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2263 fe7b0351 Michael Hanselmann
    try:
2264 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2265 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2266 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2267 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2268 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2269 fe7b0351 Michael Hanselmann
    finally:
2270 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2271 fe7b0351 Michael Hanselmann
2272 fe7b0351 Michael Hanselmann
2273 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2274 decd5f45 Iustin Pop
  """Rename an instance.
2275 decd5f45 Iustin Pop

2276 decd5f45 Iustin Pop
  """
2277 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2278 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2279 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2280 decd5f45 Iustin Pop
2281 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2282 decd5f45 Iustin Pop
    """Build hooks env.
2283 decd5f45 Iustin Pop

2284 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2285 decd5f45 Iustin Pop

2286 decd5f45 Iustin Pop
    """
2287 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2288 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2289 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2290 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2291 decd5f45 Iustin Pop
    return env, nl, nl
2292 decd5f45 Iustin Pop
2293 decd5f45 Iustin Pop
  def CheckPrereq(self):
2294 decd5f45 Iustin Pop
    """Check prerequisites.
2295 decd5f45 Iustin Pop

2296 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2297 decd5f45 Iustin Pop

2298 decd5f45 Iustin Pop
    """
2299 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2300 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2301 decd5f45 Iustin Pop
    if instance is None:
2302 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2303 decd5f45 Iustin Pop
                                 self.op.instance_name)
2304 decd5f45 Iustin Pop
    if instance.status != "down":
2305 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2306 decd5f45 Iustin Pop
                                 self.op.instance_name)
2307 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2308 decd5f45 Iustin Pop
    if remote_info:
2309 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2310 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2311 decd5f45 Iustin Pop
                                  instance.primary_node))
2312 decd5f45 Iustin Pop
    self.instance = instance
2313 decd5f45 Iustin Pop
2314 decd5f45 Iustin Pop
    # new name verification
2315 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2316 decd5f45 Iustin Pop
2317 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2318 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2319 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2320 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2321 decd5f45 Iustin Pop
      if not result.failed:
2322 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2323 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2324 decd5f45 Iustin Pop
2325 decd5f45 Iustin Pop
2326 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2327 decd5f45 Iustin Pop
    """Reinstall the instance.
2328 decd5f45 Iustin Pop

2329 decd5f45 Iustin Pop
    """
2330 decd5f45 Iustin Pop
    inst = self.instance
2331 decd5f45 Iustin Pop
    old_name = inst.name
2332 decd5f45 Iustin Pop
2333 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2334 decd5f45 Iustin Pop
2335 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2336 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2337 decd5f45 Iustin Pop
2338 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2339 decd5f45 Iustin Pop
    try:
2340 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2341 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2342 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2343 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2344 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2345 decd5f45 Iustin Pop
        logger.Error(msg)
2346 decd5f45 Iustin Pop
    finally:
2347 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2348 decd5f45 Iustin Pop
2349 decd5f45 Iustin Pop
2350 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2351 a8083063 Iustin Pop
  """Remove an instance.
2352 a8083063 Iustin Pop

2353 a8083063 Iustin Pop
  """
2354 a8083063 Iustin Pop
  HPATH = "instance-remove"
2355 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2356 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2357 a8083063 Iustin Pop
2358 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2359 a8083063 Iustin Pop
    """Build hooks env.
2360 a8083063 Iustin Pop

2361 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2362 a8083063 Iustin Pop

2363 a8083063 Iustin Pop
    """
2364 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2365 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2366 a8083063 Iustin Pop
    return env, nl, nl
2367 a8083063 Iustin Pop
2368 a8083063 Iustin Pop
  def CheckPrereq(self):
2369 a8083063 Iustin Pop
    """Check prerequisites.
2370 a8083063 Iustin Pop

2371 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2372 a8083063 Iustin Pop

2373 a8083063 Iustin Pop
    """
2374 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2375 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2376 a8083063 Iustin Pop
    if instance is None:
2377 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2378 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2379 a8083063 Iustin Pop
    self.instance = instance
2380 a8083063 Iustin Pop
2381 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2382 a8083063 Iustin Pop
    """Remove the instance.
2383 a8083063 Iustin Pop

2384 a8083063 Iustin Pop
    """
2385 a8083063 Iustin Pop
    instance = self.instance
2386 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2387 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2388 a8083063 Iustin Pop
2389 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2390 1d67656e Iustin Pop
      if self.op.ignore_failures:
2391 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2392 1d67656e Iustin Pop
      else:
2393 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2394 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2395 a8083063 Iustin Pop
2396 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2397 a8083063 Iustin Pop
2398 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2399 1d67656e Iustin Pop
      if self.op.ignore_failures:
2400 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2401 1d67656e Iustin Pop
      else:
2402 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2403 a8083063 Iustin Pop
2404 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2405 a8083063 Iustin Pop
2406 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2407 a8083063 Iustin Pop
2408 a8083063 Iustin Pop
2409 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2410 a8083063 Iustin Pop
  """Logical unit for querying instances.
2411 a8083063 Iustin Pop

2412 a8083063 Iustin Pop
  """
2413 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2414 a8083063 Iustin Pop
2415 a8083063 Iustin Pop
  def CheckPrereq(self):
2416 a8083063 Iustin Pop
    """Check prerequisites.
2417 a8083063 Iustin Pop

2418 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2419 a8083063 Iustin Pop

2420 a8083063 Iustin Pop
    """
2421 d8052456 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram", "status"])
2422 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2423 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2424 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2425 d6d415e8 Iustin Pop
                               "sda_size", "sdb_size", "vcpus"],
2426 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2427 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2428 a8083063 Iustin Pop
2429 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2430 069dcc86 Iustin Pop
2431 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2432 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2433 a8083063 Iustin Pop

2434 a8083063 Iustin Pop
    """
2435 069dcc86 Iustin Pop
    instance_names = self.wanted
2436 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2437 a8083063 Iustin Pop
                     in instance_names]
2438 a8083063 Iustin Pop
2439 a8083063 Iustin Pop
    # begin data gathering
2440 a8083063 Iustin Pop
2441 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2442 a8083063 Iustin Pop
2443 a8083063 Iustin Pop
    bad_nodes = []
2444 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2445 a8083063 Iustin Pop
      live_data = {}
2446 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2447 a8083063 Iustin Pop
      for name in nodes:
2448 a8083063 Iustin Pop
        result = node_data[name]
2449 a8083063 Iustin Pop
        if result:
2450 a8083063 Iustin Pop
          live_data.update(result)
2451 a8083063 Iustin Pop
        elif result == False:
2452 a8083063 Iustin Pop
          bad_nodes.append(name)
2453 a8083063 Iustin Pop
        # else no instance is alive
2454 a8083063 Iustin Pop
    else:
2455 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2456 a8083063 Iustin Pop
2457 a8083063 Iustin Pop
    # end data gathering
2458 a8083063 Iustin Pop
2459 a8083063 Iustin Pop
    output = []
2460 a8083063 Iustin Pop
    for instance in instance_list:
2461 a8083063 Iustin Pop
      iout = []
2462 a8083063 Iustin Pop
      for field in self.op.output_fields:
2463 a8083063 Iustin Pop
        if field == "name":
2464 a8083063 Iustin Pop
          val = instance.name
2465 a8083063 Iustin Pop
        elif field == "os":
2466 a8083063 Iustin Pop
          val = instance.os
2467 a8083063 Iustin Pop
        elif field == "pnode":
2468 a8083063 Iustin Pop
          val = instance.primary_node
2469 a8083063 Iustin Pop
        elif field == "snodes":
2470 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2471 a8083063 Iustin Pop
        elif field == "admin_state":
2472 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2473 a8083063 Iustin Pop
        elif field == "oper_state":
2474 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2475 8a23d2d3 Iustin Pop
            val = None
2476 a8083063 Iustin Pop
          else:
2477 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2478 d8052456 Iustin Pop
        elif field == "status":
2479 d8052456 Iustin Pop
          if instance.primary_node in bad_nodes:
2480 d8052456 Iustin Pop
            val = "ERROR_nodedown"
2481 d8052456 Iustin Pop
          else:
2482 d8052456 Iustin Pop
            running = bool(live_data.get(instance.name))
2483 d8052456 Iustin Pop
            if running:
2484 d8052456 Iustin Pop
              if instance.status != "down":
2485 d8052456 Iustin Pop
                val = "running"
2486 d8052456 Iustin Pop
              else:
2487 d8052456 Iustin Pop
                val = "ERROR_up"
2488 d8052456 Iustin Pop
            else:
2489 d8052456 Iustin Pop
              if instance.status != "down":
2490 d8052456 Iustin Pop
                val = "ERROR_down"
2491 d8052456 Iustin Pop
              else:
2492 d8052456 Iustin Pop
                val = "ADMIN_down"
2493 a8083063 Iustin Pop
        elif field == "admin_ram":
2494 a8083063 Iustin Pop
          val = instance.memory
2495 a8083063 Iustin Pop
        elif field == "oper_ram":
2496 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2497 8a23d2d3 Iustin Pop
            val = None
2498 a8083063 Iustin Pop
          elif instance.name in live_data:
2499 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2500 a8083063 Iustin Pop
          else:
2501 a8083063 Iustin Pop
            val = "-"
2502 a8083063 Iustin Pop
        elif field == "disk_template":
2503 a8083063 Iustin Pop
          val = instance.disk_template
2504 a8083063 Iustin Pop
        elif field == "ip":
2505 a8083063 Iustin Pop
          val = instance.nics[0].ip
2506 a8083063 Iustin Pop
        elif field == "bridge":
2507 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2508 a8083063 Iustin Pop
        elif field == "mac":
2509 a8083063 Iustin Pop
          val = instance.nics[0].mac
2510 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2511 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2512 644eeef9 Iustin Pop
          if disk is None:
2513 8a23d2d3 Iustin Pop
            val = None
2514 644eeef9 Iustin Pop
          else:
2515 644eeef9 Iustin Pop
            val = disk.size
2516 d6d415e8 Iustin Pop
        elif field == "vcpus":
2517 d6d415e8 Iustin Pop
          val = instance.vcpus
2518 a8083063 Iustin Pop
        else:
2519 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2520 a8083063 Iustin Pop
        iout.append(val)
2521 a8083063 Iustin Pop
      output.append(iout)
2522 a8083063 Iustin Pop
2523 a8083063 Iustin Pop
    return output
2524 a8083063 Iustin Pop
2525 a8083063 Iustin Pop
2526 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2527 a8083063 Iustin Pop
  """Failover an instance.
2528 a8083063 Iustin Pop

2529 a8083063 Iustin Pop
  """
2530 a8083063 Iustin Pop
  HPATH = "instance-failover"
2531 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2532 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2533 a8083063 Iustin Pop
2534 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2535 a8083063 Iustin Pop
    """Build hooks env.
2536 a8083063 Iustin Pop

2537 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2538 a8083063 Iustin Pop

2539 a8083063 Iustin Pop
    """
2540 a8083063 Iustin Pop
    env = {
2541 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2542 a8083063 Iustin Pop
      }
2543 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2544 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2545 a8083063 Iustin Pop
    return env, nl, nl
2546 a8083063 Iustin Pop
2547 a8083063 Iustin Pop
  def CheckPrereq(self):
2548 a8083063 Iustin Pop
    """Check prerequisites.
2549 a8083063 Iustin Pop

2550 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2551 a8083063 Iustin Pop

2552 a8083063 Iustin Pop
    """
2553 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2554 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2555 a8083063 Iustin Pop
    if instance is None:
2556 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2557 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2558 a8083063 Iustin Pop
2559 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2560 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2561 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2562 2a710df1 Michael Hanselmann
2563 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2564 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2565 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2566 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2567 2a710df1 Michael Hanselmann
2568 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2569 d4f16fd9 Iustin Pop
    # check memory requirements on the secondary node
2570 d4f16fd9 Iustin Pop
    _CheckNodeFreeMemory(self.cfg, target_node, "failing over instance %s" %
2571 d4f16fd9 Iustin Pop
                         instance.name, instance.memory)
2572 3a7c308e Guido Trotter
2573 a8083063 Iustin Pop
    # check bridge existance
2574 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2575 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2576 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2577 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2578 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2579 a8083063 Iustin Pop
2580 a8083063 Iustin Pop
    self.instance = instance
2581 a8083063 Iustin Pop
2582 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2583 a8083063 Iustin Pop
    """Failover an instance.
2584 a8083063 Iustin Pop

2585 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2586 a8083063 Iustin Pop
    starting it on the secondary.
2587 a8083063 Iustin Pop

2588 a8083063 Iustin Pop
    """
2589 a8083063 Iustin Pop
    instance = self.instance
2590 a8083063 Iustin Pop
2591 a8083063 Iustin Pop
    source_node = instance.primary_node
2592 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2593 a8083063 Iustin Pop
2594 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2595 a8083063 Iustin Pop
    for dev in instance.disks:
2596 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2597 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2598 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2599 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2600 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2601 a8083063 Iustin Pop
2602 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2603 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2604 a8083063 Iustin Pop
                (instance.name, source_node))
2605 a8083063 Iustin Pop
2606 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2607 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2608 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2609 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2610 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2611 24a40d57 Iustin Pop
      else:
2612 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2613 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2614 a8083063 Iustin Pop
2615 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2616 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2617 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2618 a8083063 Iustin Pop
2619 a8083063 Iustin Pop
    instance.primary_node = target_node
2620 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2621 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2622 a8083063 Iustin Pop
2623 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2624 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2625 a8083063 Iustin Pop
                (instance.name, target_node))
2626 a8083063 Iustin Pop
2627 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2628 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2629 a8083063 Iustin Pop
    if not disks_ok:
2630 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2631 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2632 a8083063 Iustin Pop
2633 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2634 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2635 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2636 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2637 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2638 a8083063 Iustin Pop
2639 a8083063 Iustin Pop
2640 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2641 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2642 a8083063 Iustin Pop

2643 a8083063 Iustin Pop
  This always creates all devices.
2644 a8083063 Iustin Pop

2645 a8083063 Iustin Pop
  """
2646 a8083063 Iustin Pop
  if device.children:
2647 a8083063 Iustin Pop
    for child in device.children:
2648 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2649 a8083063 Iustin Pop
        return False
2650 a8083063 Iustin Pop
2651 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2652 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2653 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2654 a8083063 Iustin Pop
  if not new_id:
2655 a8083063 Iustin Pop
    return False
2656 a8083063 Iustin Pop
  if device.physical_id is None:
2657 a8083063 Iustin Pop
    device.physical_id = new_id
2658 a8083063 Iustin Pop
  return True
2659 a8083063 Iustin Pop
2660 a8083063 Iustin Pop
2661 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2662 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2663 a8083063 Iustin Pop

2664 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2665 a8083063 Iustin Pop
  all its children.
2666 a8083063 Iustin Pop

2667 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2668 a8083063 Iustin Pop

2669 a8083063 Iustin Pop
  """
2670 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2671 a8083063 Iustin Pop
    force = True
2672 a8083063 Iustin Pop
  if device.children:
2673 a8083063 Iustin Pop
    for child in device.children:
2674 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2675 3f78eef2 Iustin Pop
                                        child, force, info):
2676 a8083063 Iustin Pop
        return False
2677 a8083063 Iustin Pop
2678 a8083063 Iustin Pop
  if not force:
2679 a8083063 Iustin Pop
    return True
2680 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2681 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2682 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2683 a8083063 Iustin Pop
  if not new_id:
2684 a8083063 Iustin Pop
    return False
2685 a8083063 Iustin Pop
  if device.physical_id is None:
2686 a8083063 Iustin Pop
    device.physical_id = new_id
2687 a8083063 Iustin Pop
  return True
2688 a8083063 Iustin Pop
2689 a8083063 Iustin Pop
2690 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2691 923b1523 Iustin Pop
  """Generate a suitable LV name.
2692 923b1523 Iustin Pop

2693 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2694 923b1523 Iustin Pop

2695 923b1523 Iustin Pop
  """
2696 923b1523 Iustin Pop
  results = []
2697 923b1523 Iustin Pop
  for val in exts:
2698 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2699 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2700 923b1523 Iustin Pop
  return results
2701 923b1523 Iustin Pop
2702 923b1523 Iustin Pop
2703 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2704 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2705 a8083063 Iustin Pop

2706 a8083063 Iustin Pop
  """
2707 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2708 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2709 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2710 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2711 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2712 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2713 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
2714 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2715 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2716 a8083063 Iustin Pop
  return drbd_dev
2717 a8083063 Iustin Pop
2718 a8083063 Iustin Pop
2719 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2720 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2721 a1f445d3 Iustin Pop

2722 a1f445d3 Iustin Pop
  """
2723 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2724 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2725 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2726 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2727 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2728 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2729 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2730 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2731 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2732 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2733 a1f445d3 Iustin Pop
  return drbd_dev
2734 a1f445d3 Iustin Pop
2735 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2736 a8083063 Iustin Pop
                          instance_name, primary_node,
2737 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2738 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2739 a8083063 Iustin Pop

2740 a8083063 Iustin Pop
  """
2741 a8083063 Iustin Pop
  #TODO: compute space requirements
2742 a8083063 Iustin Pop
2743 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2744 a8083063 Iustin Pop
  if template_name == "diskless":
2745 a8083063 Iustin Pop
    disks = []
2746 a8083063 Iustin Pop
  elif template_name == "plain":
2747 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2748 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2749 923b1523 Iustin Pop
2750 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2751 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2752 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2753 a8083063 Iustin Pop
                           iv_name = "sda")
2754 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2755 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2756 a8083063 Iustin Pop
                           iv_name = "sdb")
2757 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2758 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2759 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2760 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2761 923b1523 Iustin Pop
2762 923b1523 Iustin Pop
2763 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2764 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2765 fe96220b Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2766 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2767 fe96220b Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2768 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2769 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
2770 a8083063 Iustin Pop
                              size=disk_sz,
2771 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2772 fe96220b Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2773 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2774 fe96220b Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2775 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2776 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
2777 a8083063 Iustin Pop
                              size=swap_sz,
2778 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2779 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2780 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2781 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2782 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2783 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2784 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2785 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2786 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2787 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2788 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
2789 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2790 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2791 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2792 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
2793 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2794 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2795 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2796 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2797 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2798 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2799 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2800 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2801 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2802 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2803 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2804 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2805 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2806 a8083063 Iustin Pop
  else:
2807 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2808 a8083063 Iustin Pop
  return disks
2809 a8083063 Iustin Pop
2810 a8083063 Iustin Pop
2811 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2812 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2813 3ecf6786 Iustin Pop

2814 3ecf6786 Iustin Pop
  """
2815 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2816 a0c3fea1 Michael Hanselmann
2817 a0c3fea1 Michael Hanselmann
2818 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2819 a8083063 Iustin Pop
  """Create all disks for an instance.
2820 a8083063 Iustin Pop

2821 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2822 a8083063 Iustin Pop

2823 a8083063 Iustin Pop
  Args:
2824 a8083063 Iustin Pop
    instance: the instance object
2825 a8083063 Iustin Pop

2826 a8083063 Iustin Pop
  Returns:
2827 a8083063 Iustin Pop
    True or False showing the success of the creation process
2828 a8083063 Iustin Pop

2829 a8083063 Iustin Pop
  """
2830 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2831 a0c3fea1 Michael Hanselmann
2832 a8083063 Iustin Pop
  for device in instance.disks:
2833 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2834 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2835 a8083063 Iustin Pop
    #HARDCODE
2836 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2837 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2838 3f78eef2 Iustin Pop
                                        device, False, info):
2839 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2840 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2841 a8083063 Iustin Pop
        return False
2842 a8083063 Iustin Pop
    #HARDCODE
2843 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2844 3f78eef2 Iustin Pop
                                    instance, device, info):
2845 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2846 a8083063 Iustin Pop
                   device.iv_name)
2847 a8083063 Iustin Pop
      return False
2848 a8083063 Iustin Pop
  return True
2849 a8083063 Iustin Pop
2850 a8083063 Iustin Pop
2851 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2852 a8083063 Iustin Pop
  """Remove all disks for an instance.
2853 a8083063 Iustin Pop

2854 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2855 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2856 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2857 a8083063 Iustin Pop
  with `_CreateDisks()`).
2858 a8083063 Iustin Pop

2859 a8083063 Iustin Pop
  Args:
2860 a8083063 Iustin Pop
    instance: the instance object
2861 a8083063 Iustin Pop

2862 a8083063 Iustin Pop
  Returns:
2863 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2864 a8083063 Iustin Pop

2865 a8083063 Iustin Pop
  """
2866 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2867 a8083063 Iustin Pop
2868 a8083063 Iustin Pop
  result = True
2869 a8083063 Iustin Pop
  for device in instance.disks:
2870 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2871 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2872 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2873 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2874 a8083063 Iustin Pop
                     " continuing anyway" %
2875 a8083063 Iustin Pop
                     (device.iv_name, node))
2876 a8083063 Iustin Pop
        result = False
2877 a8083063 Iustin Pop
  return result
2878 a8083063 Iustin Pop
2879 a8083063 Iustin Pop
2880 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2881 a8083063 Iustin Pop
  """Create an instance.
2882 a8083063 Iustin Pop

2883 a8083063 Iustin Pop
  """
2884 a8083063 Iustin Pop
  HPATH = "instance-add"
2885 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2886 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2887 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2888 1862d460 Alexander Schreiber
              "wait_for_sync", "ip_check", "mac"]
2889 a8083063 Iustin Pop
2890 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2891 a8083063 Iustin Pop
    """Build hooks env.
2892 a8083063 Iustin Pop

2893 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2894 a8083063 Iustin Pop

2895 a8083063 Iustin Pop
    """
2896 a8083063 Iustin Pop
    env = {
2897 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2898 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2899 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2900 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2901 a8083063 Iustin Pop
      }
2902 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2903 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2904 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2905 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2906 396e1b78 Michael Hanselmann
2907 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2908 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2909 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2910 396e1b78 Michael Hanselmann
      status=self.instance_status,
2911 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2912 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2913 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2914 c7b27e9e Iustin Pop
      nics=[(self.inst_ip, self.op.bridge, self.op.mac)],
2915 396e1b78 Michael Hanselmann
    ))
2916 a8083063 Iustin Pop
2917 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2918 a8083063 Iustin Pop
          self.secondaries)
2919 a8083063 Iustin Pop
    return env, nl, nl
2920 a8083063 Iustin Pop
2921 a8083063 Iustin Pop
2922 a8083063 Iustin Pop
  def CheckPrereq(self):
2923 a8083063 Iustin Pop
    """Check prerequisites.
2924 a8083063 Iustin Pop

2925 a8083063 Iustin Pop
    """
2926 40ed12dd Guido Trotter
    for attr in ["kernel_path", "initrd_path", "hvm_boot_order"]:
2927 40ed12dd Guido Trotter
      if not hasattr(self.op, attr):
2928 40ed12dd Guido Trotter
        setattr(self.op, attr, None)
2929 40ed12dd Guido Trotter
2930 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2931 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2932 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2933 3ecf6786 Iustin Pop
                                 self.op.mode)
2934 a8083063 Iustin Pop
2935 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2936 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2937 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2938 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2939 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2940 3ecf6786 Iustin Pop
                                   " node and path options")
2941 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2942 a8083063 Iustin Pop
      if src_node_full is None:
2943 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2944 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2945 a8083063 Iustin Pop
2946 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2947 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2948 a8083063 Iustin Pop
2949 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2950 a8083063 Iustin Pop
2951 a8083063 Iustin Pop
      if not export_info:
2952 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2953 a8083063 Iustin Pop
2954 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2955 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2956 a8083063 Iustin Pop
2957 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2958 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2959 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2960 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2961 a8083063 Iustin Pop
2962 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2963 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2964 3ecf6786 Iustin Pop
                                   " one data disk")
2965 a8083063 Iustin Pop
2966 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2967 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2968 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2969 a8083063 Iustin Pop
                                                         'disk0_dump'))
2970 a8083063 Iustin Pop
      self.src_image = diskimage
2971 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2972 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2973 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2974 a8083063 Iustin Pop
2975 a8083063 Iustin Pop
    # check primary node
2976 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2977 a8083063 Iustin Pop
    if pnode is None:
2978 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2979 3ecf6786 Iustin Pop
                                 self.op.pnode)
2980 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2981 a8083063 Iustin Pop
    self.pnode = pnode
2982 a8083063 Iustin Pop
    self.secondaries = []
2983 a8083063 Iustin Pop
    # disk template and mirror node verification
2984 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2985 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2986 a8083063 Iustin Pop
2987 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
2988 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2989 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
2990 3ecf6786 Iustin Pop
                                   " a mirror node")
2991 a8083063 Iustin Pop
2992 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2993 a8083063 Iustin Pop
      if snode_name is None:
2994 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2995 3ecf6786 Iustin Pop
                                   self.op.snode)
2996 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2997 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
2998 3ecf6786 Iustin Pop
                                   " the primary node.")
2999 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
3000 a8083063 Iustin Pop
3001 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
3002 ed1ebc60 Guido Trotter
    req_size_dict = {
3003 8d75db10 Iustin Pop
      constants.DT_DISKLESS: None,
3004 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
3005 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
3006 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
3007 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
3008 a1f445d3 Iustin Pop
      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
3009 ed1ebc60 Guido Trotter
    }
3010 ed1ebc60 Guido Trotter
3011 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
3012 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
3013 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
3014 ed1ebc60 Guido Trotter
3015 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
3016 ed1ebc60 Guido Trotter
3017 8d75db10 Iustin Pop
    # Check lv size requirements
3018 8d75db10 Iustin Pop
    if req_size is not None:
3019 8d75db10 Iustin Pop
      nodenames = [pnode.name] + self.secondaries
3020 8d75db10 Iustin Pop
      nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
3021 8d75db10 Iustin Pop
      for node in nodenames:
3022 8d75db10 Iustin Pop
        info = nodeinfo.get(node, None)
3023 8d75db10 Iustin Pop
        if not info:
3024 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Cannot get current information"
3025 8d75db10 Iustin Pop
                                     " from node '%s'" % nodeinfo)
3026 8d75db10 Iustin Pop
        vg_free = info.get('vg_free', None)
3027 8d75db10 Iustin Pop
        if not isinstance(vg_free, int):
3028 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Can't compute free disk space on"
3029 8d75db10 Iustin Pop
                                     " node %s" % node)
3030 8d75db10 Iustin Pop
        if req_size > info['vg_free']:
3031 8d75db10 Iustin Pop
          raise errors.OpPrereqError("Not enough disk space on target node %s."
3032 8d75db10 Iustin Pop
                                     " %d MB available, %d MB required" %
3033 8d75db10 Iustin Pop
                                     (node, info['vg_free'], req_size))
3034 ed1ebc60 Guido Trotter
3035 a8083063 Iustin Pop
    # os verification
3036 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
3037 dfa96ded Guido Trotter
    if not os_obj:
3038 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
3039 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
3040 a8083063 Iustin Pop
3041 3b6d8c9b Iustin Pop
    if self.op.kernel_path == constants.VALUE_NONE:
3042 3b6d8c9b Iustin Pop
      raise errors.OpPrereqError("Can't set instance kernel to none")
3043 3b6d8c9b Iustin Pop
3044 a8083063 Iustin Pop
    # instance verification
3045 89e1fc26 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
3046 a8083063 Iustin Pop
3047 bcf043c9 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
3048 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
3049 a8083063 Iustin Pop
    if instance_name in instance_list:
3050 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
3051 3ecf6786 Iustin Pop
                                 instance_name)
3052 a8083063 Iustin Pop
3053 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
3054 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
3055 a8083063 Iustin Pop
      inst_ip = None
3056 a8083063 Iustin Pop
    elif ip.lower() == "auto":
3057 bcf043c9 Iustin Pop
      inst_ip = hostname1.ip
3058 a8083063 Iustin Pop
    else:
3059 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
3060 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
3061 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
3062 a8083063 Iustin Pop
      inst_ip = ip
3063 a8083063 Iustin Pop
    self.inst_ip = inst_ip
3064 a8083063 Iustin Pop
3065 bdd55f71 Iustin Pop
    if self.op.start and not self.op.ip_check:
3066 bdd55f71 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
3067 bdd55f71 Iustin Pop
                                 " adding an instance in start mode")
3068 bdd55f71 Iustin Pop
3069 bdd55f71 Iustin Pop
    if self.op.ip_check:
3070 16abfbc2 Alexander Schreiber
      if utils.TcpPing(utils.HostInfo().name, hostname1.ip,
3071 16abfbc2 Alexander Schreiber
                       constants.DEFAULT_NODED_PORT):
3072 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3073 16abfbc2 Alexander Schreiber
                                   (hostname1.ip, instance_name))
3074 a8083063 Iustin Pop
3075 1862d460 Alexander Schreiber
    # MAC address verification
3076 1862d460 Alexander Schreiber
    if self.op.mac != "auto":
3077 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.op.mac.lower()):
3078 1862d460 Alexander Schreiber
        raise errors.OpPrereqError("invalid MAC address specified: %s" %
3079 1862d460 Alexander Schreiber
                                   self.op.mac)
3080 1862d460 Alexander Schreiber
3081 a8083063 Iustin Pop
    # bridge verification
3082 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3083 a8083063 Iustin Pop
    if bridge is None:
3084 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3085 a8083063 Iustin Pop
    else:
3086 a8083063 Iustin Pop
      self.op.bridge = bridge
3087 a8083063 Iustin Pop
3088 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3089 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3090 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3091 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3092 a8083063 Iustin Pop
3093 25c5878d Alexander Schreiber
    # boot order verification
3094 25c5878d Alexander Schreiber
    if self.op.hvm_boot_order is not None:
3095 25c5878d Alexander Schreiber
      if len(self.op.hvm_boot_order.strip("acdn")) != 0:
3096 b08d5a87 Iustin Pop
        raise errors.OpPrereqError("invalid boot order specified,"
3097 b08d5a87 Iustin Pop
                                   " must be one or more of [acdn]")
3098 25c5878d Alexander Schreiber
3099 a8083063 Iustin Pop
    if self.op.start:
3100 a8083063 Iustin Pop
      self.instance_status = 'up'
3101 a8083063 Iustin Pop
    else:
3102 a8083063 Iustin Pop
      self.instance_status = 'down'
3103 a8083063 Iustin Pop
3104 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3105 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3106 a8083063 Iustin Pop

3107 a8083063 Iustin Pop
    """
3108 a8083063 Iustin Pop
    instance = self.op.instance_name
3109 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3110 a8083063 Iustin Pop
3111 1862d460 Alexander Schreiber
    if self.op.mac == "auto":
3112 ba4b62cf Iustin Pop
      mac_address = self.cfg.GenerateMAC()
3113 1862d460 Alexander Schreiber
    else:
3114 ba4b62cf Iustin Pop
      mac_address = self.op.mac
3115 1862d460 Alexander Schreiber
3116 1862d460 Alexander Schreiber
    nic = objects.NIC(bridge=self.op.bridge, mac=mac_address)
3117 a8083063 Iustin Pop
    if self.inst_ip is not None:
3118 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3119 a8083063 Iustin Pop
3120 2a6469d5 Alexander Schreiber
    ht_kind = self.sstore.GetHypervisorType()
3121 2a6469d5 Alexander Schreiber
    if ht_kind in constants.HTS_REQ_PORT:
3122 2a6469d5 Alexander Schreiber
      network_port = self.cfg.AllocatePort()
3123 2a6469d5 Alexander Schreiber
    else:
3124 2a6469d5 Alexander Schreiber
      network_port = None
3125 58acb49d Alexander Schreiber
3126 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3127 a8083063 Iustin Pop
                                  self.op.disk_template,
3128 a8083063 Iustin Pop
                                  instance, pnode_name,
3129 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3130 a8083063 Iustin Pop
                                  self.op.swap_size)
3131 a8083063 Iustin Pop
3132 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3133 a8083063 Iustin Pop
                            primary_node=pnode_name,
3134 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3135 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3136 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3137 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3138 a8083063 Iustin Pop
                            status=self.instance_status,
3139 58acb49d Alexander Schreiber
                            network_port=network_port,
3140 3b6d8c9b Iustin Pop
                            kernel_path=self.op.kernel_path,
3141 3b6d8c9b Iustin Pop
                            initrd_path=self.op.initrd_path,
3142 25c5878d Alexander Schreiber
                            hvm_boot_order=self.op.hvm_boot_order,
3143 a8083063 Iustin Pop
                            )
3144 a8083063 Iustin Pop
3145 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3146 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3147 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3148 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3149 a8083063 Iustin Pop
3150 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3151 a8083063 Iustin Pop
3152 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3153 a8083063 Iustin Pop
3154 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3155 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3156 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3157 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3158 a8083063 Iustin Pop
      time.sleep(15)
3159 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3160 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3161 a8083063 Iustin Pop
    else:
3162 a8083063 Iustin Pop
      disk_abort = False
3163 a8083063 Iustin Pop
3164 a8083063 Iustin Pop
    if disk_abort:
3165 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3166 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3167 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3168 3ecf6786 Iustin Pop
                               " this instance")
3169 a8083063 Iustin Pop
3170 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3171 a8083063 Iustin Pop
                (instance, pnode_name))
3172 a8083063 Iustin Pop
3173 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3174 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3175 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3176 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3177 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3178 3ecf6786 Iustin Pop
                                   " on node %s" %
3179 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3180 a8083063 Iustin Pop
3181 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3182 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3183 a8083063 Iustin Pop
        src_node = self.op.src_node
3184 a8083063 Iustin Pop
        src_image = self.src_image
3185 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3186 a8083063 Iustin Pop
                                                src_node, src_image):
3187 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3188 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3189 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3190 a8083063 Iustin Pop
      else:
3191 a8083063 Iustin Pop
        # also checked in the prereq part
3192 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3193 3ecf6786 Iustin Pop
                                     % self.op.mode)
3194 a8083063 Iustin Pop
3195 a8083063 Iustin Pop
    if self.op.start:
3196 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3197 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3198 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3199 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3200 a8083063 Iustin Pop
3201 a8083063 Iustin Pop
3202 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3203 a8083063 Iustin Pop
  """Connect to an instance's console.
3204 a8083063 Iustin Pop

3205 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3206 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3207 a8083063 Iustin Pop
  console.
3208 a8083063 Iustin Pop

3209 a8083063 Iustin Pop
  """
3210 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3211 a8083063 Iustin Pop
3212 a8083063 Iustin Pop
  def CheckPrereq(self):
3213 a8083063 Iustin Pop
    """Check prerequisites.
3214 a8083063 Iustin Pop

3215 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3216 a8083063 Iustin Pop

3217 a8083063 Iustin Pop
    """
3218 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3219 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3220 a8083063 Iustin Pop
    if instance is None:
3221 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3222 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3223 a8083063 Iustin Pop
    self.instance = instance
3224 a8083063 Iustin Pop
3225 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3226 a8083063 Iustin Pop
    """Connect to the console of an instance
3227 a8083063 Iustin Pop

3228 a8083063 Iustin Pop
    """
3229 a8083063 Iustin Pop
    instance = self.instance
3230 a8083063 Iustin Pop
    node = instance.primary_node
3231 a8083063 Iustin Pop
3232 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3233 a8083063 Iustin Pop
    if node_insts is False:
3234 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3235 a8083063 Iustin Pop
3236 a8083063 Iustin Pop
    if instance.name not in node_insts:
3237 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3238 a8083063 Iustin Pop
3239 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3240 a8083063 Iustin Pop
3241 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3242 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3243 82122173 Iustin Pop
    # build ssh cmdline
3244 82122173 Iustin Pop
    argv = ["ssh", "-q", "-t"]
3245 82122173 Iustin Pop
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
3246 82122173 Iustin Pop
    argv.extend(ssh.BATCH_MODE_OPTS)
3247 82122173 Iustin Pop
    argv.append(node)
3248 82122173 Iustin Pop
    argv.append(console_cmd)
3249 82122173 Iustin Pop
    return "ssh", argv
3250 a8083063 Iustin Pop
3251 a8083063 Iustin Pop
3252 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
3253 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
3254 a8083063 Iustin Pop

3255 a8083063 Iustin Pop
  """
3256 a8083063 Iustin Pop
  HPATH = "mirror-add"
3257 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3258 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3259 a8083063 Iustin Pop
3260 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3261 a8083063 Iustin Pop
    """Build hooks env.
3262 a8083063 Iustin Pop

3263 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3264 a8083063 Iustin Pop

3265 a8083063 Iustin Pop
    """
3266 a8083063 Iustin Pop
    env = {
3267 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3268 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3269 a8083063 Iustin Pop
      }
3270 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3271 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3272 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3273 a8083063 Iustin Pop
    return env, nl, nl
3274 a8083063 Iustin Pop
3275 a8083063 Iustin Pop
  def CheckPrereq(self):
3276 a8083063 Iustin Pop
    """Check prerequisites.
3277 a8083063 Iustin Pop

3278 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3279 a8083063 Iustin Pop

3280 a8083063 Iustin Pop
    """
3281 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3282 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3283 a8083063 Iustin Pop
    if instance is None:
3284 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3285 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3286 a8083063 Iustin Pop
    self.instance = instance
3287 a8083063 Iustin Pop
3288 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3289 a8083063 Iustin Pop
    if remote_node is None:
3290 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3291 a8083063 Iustin Pop
    self.remote_node = remote_node
3292 a8083063 Iustin Pop
3293 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3294 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3295 3ecf6786 Iustin Pop
                                 " the instance.")
3296 a8083063 Iustin Pop
3297 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3298 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3299 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3300 a8083063 Iustin Pop
    for disk in instance.disks:
3301 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3302 a8083063 Iustin Pop
        break
3303 a8083063 Iustin Pop
    else:
3304 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3305 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3306 a8083063 Iustin Pop
    if len(disk.children) > 1:
3307 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("The device already has two slave devices."
3308 f4bc1f2c Michael Hanselmann
                                 " This would create a 3-disk raid1 which we"
3309 f4bc1f2c Michael Hanselmann
                                 " don't allow.")
3310 a8083063 Iustin Pop
    self.disk = disk
3311 a8083063 Iustin Pop
3312 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3313 a8083063 Iustin Pop
    """Add the mirror component
3314 a8083063 Iustin Pop

3315 a8083063 Iustin Pop
    """
3316 a8083063 Iustin Pop
    disk = self.disk
3317 a8083063 Iustin Pop
    instance = self.instance
3318 a8083063 Iustin Pop
3319 a8083063 Iustin Pop
    remote_node = self.remote_node
3320 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3321 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
3322 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3323 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
3324 a8083063 Iustin Pop
3325 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
3326 a8083063 Iustin Pop
    #HARDCODE
3327 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, instance,
3328 3f78eef2 Iustin Pop
                                      new_drbd, False,
3329 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3330 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
3331 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
3332 a8083063 Iustin Pop
3333 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
3334 a8083063 Iustin Pop
    #HARDCODE
3335 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node,
3336 3f78eef2 Iustin Pop
                                    instance, new_drbd,
3337 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
3338 a8083063 Iustin Pop
      # remove secondary dev
3339 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3340 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
3341 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
3342 a8083063 Iustin Pop
3343 a8083063 Iustin Pop
    # the device exists now
3344 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
3345 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
3346 153d9724 Iustin Pop
    if not rpc.call_blockdev_addchildren(instance.primary_node,
3347 153d9724 Iustin Pop
                                         disk, [new_drbd]):
3348 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
3349 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3350 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3351 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
3352 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3353 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3354 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
3355 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
3356 a8083063 Iustin Pop
3357 a8083063 Iustin Pop
    disk.children.append(new_drbd)
3358 a8083063 Iustin Pop
3359 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3360 a8083063 Iustin Pop
3361 5bfac263 Iustin Pop
    _WaitForSync(self.cfg, instance, self.proc)
3362 a8083063 Iustin Pop
3363 a8083063 Iustin Pop
    return 0
3364 a8083063 Iustin Pop
3365 a8083063 Iustin Pop
3366 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
3367 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
3368 a8083063 Iustin Pop

3369 a8083063 Iustin Pop
  """
3370 a8083063 Iustin Pop
  HPATH = "mirror-remove"
3371 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3372 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3373 a8083063 Iustin Pop
3374 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3375 a8083063 Iustin Pop
    """Build hooks env.
3376 a8083063 Iustin Pop

3377 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3378 a8083063 Iustin Pop

3379 a8083063 Iustin Pop
    """
3380 a8083063 Iustin Pop
    env = {
3381 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3382 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
3383 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
3384 a8083063 Iustin Pop
      }
3385 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3386 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3387 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3388 a8083063 Iustin Pop
    return env, nl, nl
3389 a8083063 Iustin Pop
3390 a8083063 Iustin Pop
  def CheckPrereq(self):
3391 a8083063 Iustin Pop
    """Check prerequisites.
3392 a8083063 Iustin Pop

3393 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3394 a8083063 Iustin Pop

3395 a8083063 Iustin Pop
    """
3396 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3397 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3398 a8083063 Iustin Pop
    if instance is None:
3399 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3400 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3401 a8083063 Iustin Pop
    self.instance = instance
3402 a8083063 Iustin Pop
3403 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3404 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3405 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3406 a8083063 Iustin Pop
    for disk in instance.disks:
3407 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3408 a8083063 Iustin Pop
        break
3409 a8083063 Iustin Pop
    else:
3410 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3411 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3412 a8083063 Iustin Pop
    for child in disk.children:
3413 fe96220b Iustin Pop
      if (child.dev_type == constants.LD_DRBD7 and
3414 fe96220b Iustin Pop
          child.logical_id[2] == self.op.disk_id):
3415 a8083063 Iustin Pop
        break
3416 a8083063 Iustin Pop
    else:
3417 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3418 a8083063 Iustin Pop
3419 a8083063 Iustin Pop
    if len(disk.children) < 2:
3420 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3421 3ecf6786 Iustin Pop
                                 " a mirror.")
3422 a8083063 Iustin Pop
    self.disk = disk
3423 a8083063 Iustin Pop
    self.child = child
3424 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3425 a8083063 Iustin Pop
      oid = 1
3426 a8083063 Iustin Pop
    else:
3427 a8083063 Iustin Pop
      oid = 0
3428 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3429 a8083063 Iustin Pop
3430 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3431 a8083063 Iustin Pop
    """Remove the mirror component
3432 a8083063 Iustin Pop

3433 a8083063 Iustin Pop
    """
3434 a8083063 Iustin Pop
    instance = self.instance
3435 a8083063 Iustin Pop
    disk = self.disk
3436 a8083063 Iustin Pop
    child = self.child
3437 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3438 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3439 153d9724 Iustin Pop
    if not rpc.call_blockdev_removechildren(instance.primary_node,
3440 153d9724 Iustin Pop
                                            disk, [child]):
3441 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3442 a8083063 Iustin Pop
3443 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3444 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3445 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3446 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3447 a8083063 Iustin Pop
                     " continuing operation." % node)
3448 a8083063 Iustin Pop
3449 a8083063 Iustin Pop
    disk.children.remove(child)
3450 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3451 a8083063 Iustin Pop
3452 a8083063 Iustin Pop
3453 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3454 a8083063 Iustin Pop
  """Replace the disks of an instance.
3455 a8083063 Iustin Pop

3456 a8083063 Iustin Pop
  """
3457 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3458 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3459 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3460 a8083063 Iustin Pop
3461 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3462 a8083063 Iustin Pop
    """Build hooks env.
3463 a8083063 Iustin Pop

3464 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3465 a8083063 Iustin Pop

3466 a8083063 Iustin Pop
    """
3467 a8083063 Iustin Pop
    env = {
3468 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3469 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3470 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3471 a8083063 Iustin Pop
      }
3472 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3473 0834c866 Iustin Pop
    nl = [
3474 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3475 0834c866 Iustin Pop
      self.instance.primary_node,
3476 0834c866 Iustin Pop
      ]
3477 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3478 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3479 a8083063 Iustin Pop
    return env, nl, nl
3480 a8083063 Iustin Pop
3481 a8083063 Iustin Pop
  def CheckPrereq(self):
3482 a8083063 Iustin Pop
    """Check prerequisites.
3483 a8083063 Iustin Pop

3484 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3485 a8083063 Iustin Pop

3486 a8083063 Iustin Pop
    """
3487 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3488 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3489 a8083063 Iustin Pop
    if instance is None:
3490 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3491 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3492 a8083063 Iustin Pop
    self.instance = instance
3493 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3494 a8083063 Iustin Pop
3495 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3496 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3497 a9e0c397 Iustin Pop
                                 " network mirrored.")
3498 a8083063 Iustin Pop
3499 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3500 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3501 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3502 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3503 a8083063 Iustin Pop
3504 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3505 a9e0c397 Iustin Pop
3506 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3507 a9e0c397 Iustin Pop
    if remote_node is not None:
3508 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3509 a8083063 Iustin Pop
      if remote_node is None:
3510 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3511 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3512 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3513 a9e0c397 Iustin Pop
    else:
3514 a9e0c397 Iustin Pop
      self.remote_node_info = None
3515 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3516 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3517 3ecf6786 Iustin Pop
                                 " the instance.")
3518 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3519 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3520 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3521 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3522 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3523 0834c866 Iustin Pop
                                   " replacement")
3524 a9e0c397 Iustin Pop
      # the user gave the current secondary, switch to
3525 0834c866 Iustin Pop
      # 'no-replace-secondary' mode for drbd7
3526 a9e0c397 Iustin Pop
      remote_node = None
3527 a9e0c397 Iustin Pop
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3528 a9e0c397 Iustin Pop
        self.op.mode != constants.REPLACE_DISK_ALL):
3529 a9e0c397 Iustin Pop
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3530 a9e0c397 Iustin Pop
                                 " disks replacement, not individual ones")
3531 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3532 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3533 7df43a76 Iustin Pop
          remote_node is not None):
3534 7df43a76 Iustin Pop
        # switch to replace secondary mode
3535 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3536 7df43a76 Iustin Pop
3537 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3538 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3539 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3540 a9e0c397 Iustin Pop
                                   " both at once")
3541 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3542 a9e0c397 Iustin Pop
        if remote_node is not None:
3543 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3544 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3545 a9e0c397 Iustin Pop
                                     " node disk replacement")
3546 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3547 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3548 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3549 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3550 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3551 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3552 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3553 a9e0c397 Iustin Pop
      else:
3554 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3555 a9e0c397 Iustin Pop
3556 a9e0c397 Iustin Pop
    for name in self.op.disks:
3557 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3558 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3559 a9e0c397 Iustin Pop
                                   (name, instance.name))
3560 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3561 a8083063 Iustin Pop
3562 a9e0c397 Iustin Pop
  def _ExecRR1(self, feedback_fn):
3563 a8083063 Iustin Pop
    """Replace the disks of an instance.
3564 a8083063 Iustin Pop

3565 a8083063 Iustin Pop
    """
3566 a8083063 Iustin Pop
    instance = self.instance
3567 a8083063 Iustin Pop
    iv_names = {}
3568 a8083063 Iustin Pop
    # start of work
3569 a9e0c397 Iustin Pop
    if self.op.remote_node is None:
3570 a9e0c397 Iustin Pop
      remote_node = self.sec_node
3571 a9e0c397 Iustin Pop
    else:
3572 a9e0c397 Iustin Pop
      remote_node = self.op.remote_node
3573 a8083063 Iustin Pop
    cfg = self.cfg
3574 a8083063 Iustin Pop
    for dev in instance.disks:
3575 a8083063 Iustin Pop
      size = dev.size
3576 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3577 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3578 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3579 923b1523 Iustin Pop
                                       remote_node, size, names)
3580 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3581 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3582 a8083063 Iustin Pop
                  dev.iv_name)
3583 a8083063 Iustin Pop
      #HARDCODE
3584 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3585 3f78eef2 Iustin Pop
                                        new_drbd, False,
3586 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3587 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create new component on secondary"
3588 f4bc1f2c Michael Hanselmann
                                 " node %s. Full abort, cleanup manually!" %
3589 3ecf6786 Iustin Pop
                                 remote_node)
3590 a8083063 Iustin Pop
3591 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3592 a8083063 Iustin Pop
      #HARDCODE
3593 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3594 3f78eef2 Iustin Pop
                                      instance, new_drbd,
3595 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3596 a8083063 Iustin Pop
        # remove secondary dev
3597 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3598 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3599 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create volume on primary!"
3600 f4bc1f2c Michael Hanselmann
                                 " Full abort, cleanup manually!!")
3601 a8083063 Iustin Pop
3602 a8083063 Iustin Pop
      # the device exists now
3603 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3604 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3605 153d9724 Iustin Pop
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3606 153d9724 Iustin Pop
                                           [new_drbd]):
3607 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3608 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3609 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3610 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3611 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3612 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3613 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3614 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3615 a8083063 Iustin Pop
3616 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3617 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3618 a8083063 Iustin Pop
3619 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3620 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3621 a8083063 Iustin Pop
    # return value
3622 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3623 a8083063 Iustin Pop
3624 a8083063 Iustin Pop
    # so check manually all the devices
3625 a8083063 Iustin Pop
    for name in iv_names:
3626 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3627 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3628 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3629 a8083063 Iustin Pop
      if is_degr:
3630 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3631 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3632 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3633 a8083063 Iustin Pop
      if is_degr:
3634 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3635 a8083063 Iustin Pop
3636 a8083063 Iustin Pop
    for name in iv_names:
3637 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3638 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3639 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3640 153d9724 Iustin Pop
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3641 153d9724 Iustin Pop
                                              dev, [child]):
3642 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3643 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3644 a8083063 Iustin Pop
        continue
3645 a8083063 Iustin Pop
3646 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3647 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3648 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3649 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3650 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3651 a8083063 Iustin Pop
                       " continuing operation." % node)
3652 a8083063 Iustin Pop
3653 a8083063 Iustin Pop
      dev.children.remove(child)
3654 a8083063 Iustin Pop
3655 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3656 a8083063 Iustin Pop
3657 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3658 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3659 a9e0c397 Iustin Pop

3660 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3661 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3662 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3663 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3664 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3665 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3666 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3667 a9e0c397 Iustin Pop
      - wait for sync across all devices
3668 a9e0c397 Iustin Pop
      - for each modified disk:
3669 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3670 a9e0c397 Iustin Pop

3671 a9e0c397 Iustin Pop
    Failures are not very well handled.
3672 cff90b79 Iustin Pop

3673 a9e0c397 Iustin Pop
    """
3674 cff90b79 Iustin Pop
    steps_total = 6
3675 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3676 a9e0c397 Iustin Pop
    instance = self.instance
3677 a9e0c397 Iustin Pop
    iv_names = {}
3678 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3679 a9e0c397 Iustin Pop
    # start of work
3680 a9e0c397 Iustin Pop
    cfg = self.cfg
3681 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3682 cff90b79 Iustin Pop
    oth_node = self.oth_node
3683 cff90b79 Iustin Pop
3684 cff90b79 Iustin Pop
    # Step: check device activation
3685 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3686 cff90b79 Iustin Pop
    info("checking volume groups")
3687 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3688 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3689 cff90b79 Iustin Pop
    if not results:
3690 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3691 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3692 cff90b79 Iustin Pop
      res = results.get(node, False)
3693 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3694 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3695 cff90b79 Iustin Pop
                                 (my_vg, node))
3696 cff90b79 Iustin Pop
    for dev in instance.disks:
3697 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3698 cff90b79 Iustin Pop
        continue
3699 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3700 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3701 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3702 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3703 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3704 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3705 cff90b79 Iustin Pop
3706 cff90b79 Iustin Pop
    # Step: check other node consistency
3707 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3708 cff90b79 Iustin Pop
    for dev in instance.disks:
3709 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3710 cff90b79 Iustin Pop
        continue
3711 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3712 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3713 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3714 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3715 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3716 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3717 cff90b79 Iustin Pop
3718 cff90b79 Iustin Pop
    # Step: create new storage
3719 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3720 a9e0c397 Iustin Pop
    for dev in instance.disks:
3721 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3722 a9e0c397 Iustin Pop
        continue
3723 a9e0c397 Iustin Pop
      size = dev.size
3724 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3725 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3726 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3727 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3728 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3729 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3730 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3731 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3732 a9e0c397 Iustin Pop
      old_lvs = dev.children
3733 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3734 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3735 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3736 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3737 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3738 a9e0c397 Iustin Pop
      # are talking about the secondary node
3739 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3740 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3741 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3742 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3743 a9e0c397 Iustin Pop
                                   " node '%s'" %
3744 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3745 a9e0c397 Iustin Pop
3746 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3747 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3748 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3749 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3750 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3751 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3752 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3753 cff90b79 Iustin Pop
      #dev.children = []
3754 cff90b79 Iustin Pop
      #cfg.Update(instance)
3755 a9e0c397 Iustin Pop
3756 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3757 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3758 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3759 a9e0c397 Iustin Pop
      # using the assumption than logical_id == physical_id (which in
3760 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3761 cff90b79 Iustin Pop
3762 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3763 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3764 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3765 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3766 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3767 cff90b79 Iustin Pop
      rlist = []
3768 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3769 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3770 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3771 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3772 cff90b79 Iustin Pop
3773 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3774 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3775 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3776 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3777 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3778 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3779 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3780 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3781 cff90b79 Iustin Pop
3782 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3783 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3784 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3785 a9e0c397 Iustin Pop
3786 cff90b79 Iustin Pop
      for disk in old_lvs:
3787 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3788 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3789 a9e0c397 Iustin Pop
3790 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3791 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3792 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3793 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3794 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3795 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3796 cff90b79 Iustin Pop
                    " logical volumes")
3797 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3798 a9e0c397 Iustin Pop
3799 a9e0c397 Iustin Pop
      dev.children = new_lvs
3800 a9e0c397 Iustin Pop
      cfg.Update(instance)
3801 a9e0c397 Iustin Pop
3802 cff90b79 Iustin Pop
    # Step: wait for sync
3803 a9e0c397 Iustin Pop
3804 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3805 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3806 a9e0c397 Iustin Pop
    # return value
3807 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3808 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3809 a9e0c397 Iustin Pop
3810 a9e0c397 Iustin Pop
    # so check manually all the devices
3811 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3812 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3813 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3814 a9e0c397 Iustin Pop
      if is_degr:
3815 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3816 a9e0c397 Iustin Pop
3817 cff90b79 Iustin Pop
    # Step: remove old storage
3818 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3819 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3820 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3821 a9e0c397 Iustin Pop
      for lv in old_lvs:
3822 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3823 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3824 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3825 a9e0c397 Iustin Pop
          continue
3826 a9e0c397 Iustin Pop
3827 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3828 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3829 a9e0c397 Iustin Pop

3830 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3831 a9e0c397 Iustin Pop
      - for all disks of the instance:
3832 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3833 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3834 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3835 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3836 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3837 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3838 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3839 a9e0c397 Iustin Pop
          not network enabled
3840 a9e0c397 Iustin Pop
      - wait for sync across all devices
3841 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3842 a9e0c397 Iustin Pop

3843 a9e0c397 Iustin Pop
    Failures are not very well handled.
3844 0834c866 Iustin Pop

3845 a9e0c397 Iustin Pop
    """
3846 0834c866 Iustin Pop
    steps_total = 6
3847 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3848 a9e0c397 Iustin Pop
    instance = self.instance
3849 a9e0c397 Iustin Pop
    iv_names = {}
3850 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3851 a9e0c397 Iustin Pop
    # start of work
3852 a9e0c397 Iustin Pop
    cfg = self.cfg
3853 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3854 a9e0c397 Iustin Pop
    new_node = self.new_node
3855 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3856 0834c866 Iustin Pop
3857 0834c866 Iustin Pop
    # Step: check device activation
3858 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3859 0834c866 Iustin Pop
    info("checking volume groups")
3860 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3861 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3862 0834c866 Iustin Pop
    if not results:
3863 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3864 0834c866 Iustin Pop
    for node in pri_node, new_node:
3865 0834c866 Iustin Pop
      res = results.get(node, False)
3866 0834c866 Iustin Pop
      if not res or my_vg not in res:
3867 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3868 0834c866 Iustin Pop
                                 (my_vg, node))
3869 0834c866 Iustin Pop
    for dev in instance.disks:
3870 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3871 0834c866 Iustin Pop
        continue
3872 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3873 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3874 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3875 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3876 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3877 0834c866 Iustin Pop
3878 0834c866 Iustin Pop
    # Step: check other node consistency
3879 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3880 0834c866 Iustin Pop
    for dev in instance.disks:
3881 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3882 0834c866 Iustin Pop
        continue
3883 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3884 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3885 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3886 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3887 0834c866 Iustin Pop
                                 pri_node)
3888 0834c866 Iustin Pop
3889 0834c866 Iustin Pop
    # Step: create new storage
3890 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3891 a9e0c397 Iustin Pop
    for dev in instance.disks:
3892 a9e0c397 Iustin Pop
      size = dev.size
3893 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3894 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3895 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3896 a9e0c397 Iustin Pop
      # are talking about the secondary node
3897 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3898 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3899 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3900 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3901 a9e0c397 Iustin Pop
                                   " node '%s'" %
3902 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3903 a9e0c397 Iustin Pop
3904 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3905 0834c866 Iustin Pop
3906 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3907 0834c866 Iustin Pop
    for dev in instance.disks:
3908 0834c866 Iustin Pop
      size = dev.size
3909 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3910 a9e0c397 Iustin Pop
      # create new devices on new_node
3911 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3912 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3913 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3914 a9e0c397 Iustin Pop
                              children=dev.children)
3915 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3916 3f78eef2 Iustin Pop
                                        new_drbd, False,
3917 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3918 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3919 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3920 a9e0c397 Iustin Pop
3921 0834c866 Iustin Pop
    for dev in instance.disks:
3922 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3923 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3924 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3925 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3926 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3927 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3928 a9e0c397 Iustin Pop
3929 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3930 642445d9 Iustin Pop
    done = 0
3931 642445d9 Iustin Pop
    for dev in instance.disks:
3932 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3933 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3934 642445d9 Iustin Pop
      # detach from network
3935 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3936 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3937 642445d9 Iustin Pop
      # standalone state
3938 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3939 642445d9 Iustin Pop
        done += 1
3940 642445d9 Iustin Pop
      else:
3941 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3942 642445d9 Iustin Pop
                dev.iv_name)
3943 642445d9 Iustin Pop
3944 642445d9 Iustin Pop
    if not done:
3945 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3946 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3947 642445d9 Iustin Pop
3948 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3949 642445d9 Iustin Pop
    # the instance to point to the new secondary
3950 642445d9 Iustin Pop
    info("updating instance configuration")
3951 642445d9 Iustin Pop
    for dev in instance.disks:
3952 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3953 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3954 642445d9 Iustin Pop
    cfg.Update(instance)
3955 a9e0c397 Iustin Pop
3956 642445d9 Iustin Pop
    # and now perform the drbd attach
3957 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3958 642445d9 Iustin Pop
    failures = []
3959 642445d9 Iustin Pop
    for dev in instance.disks:
3960 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3961 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3962 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3963 642445d9 Iustin Pop
      # is correct
3964 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3965 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3966 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3967 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3968 a9e0c397 Iustin Pop
3969 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3970 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3971 a9e0c397 Iustin Pop
    # return value
3972 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3973 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3974 a9e0c397 Iustin Pop
3975 a9e0c397 Iustin Pop
    # so check manually all the devices
3976 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3977 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3978 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3979 a9e0c397 Iustin Pop
      if is_degr:
3980 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3981 a9e0c397 Iustin Pop
3982 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3983 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3984 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3985 a9e0c397 Iustin Pop
      for lv in old_lvs:
3986 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3987 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3988 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3989 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3990 a9e0c397 Iustin Pop
3991 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3992 a9e0c397 Iustin Pop
    """Execute disk replacement.
3993 a9e0c397 Iustin Pop

3994 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3995 a9e0c397 Iustin Pop

3996 a9e0c397 Iustin Pop
    """
3997 a9e0c397 Iustin Pop
    instance = self.instance
3998 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_REMOTE_RAID1:
3999 a9e0c397 Iustin Pop
      fn = self._ExecRR1
4000 a9e0c397 Iustin Pop
    elif instance.disk_template == constants.DT_DRBD8:
4001 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
4002 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
4003 a9e0c397 Iustin Pop
      else:
4004 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
4005 a9e0c397 Iustin Pop
    else:
4006 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
4007 a9e0c397 Iustin Pop
    return fn(feedback_fn)
4008 a9e0c397 Iustin Pop
4009 a8083063 Iustin Pop
4010 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
4011 a8083063 Iustin Pop
  """Query runtime instance data.
4012 a8083063 Iustin Pop

4013 a8083063 Iustin Pop
  """
4014 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
4015 a8083063 Iustin Pop
4016 a8083063 Iustin Pop
  def CheckPrereq(self):
4017 a8083063 Iustin Pop
    """Check prerequisites.
4018 a8083063 Iustin Pop

4019 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
4020 a8083063 Iustin Pop

4021 a8083063 Iustin Pop
    """
4022 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
4023 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
4024 a8083063 Iustin Pop
    if self.op.instances:
4025 a8083063 Iustin Pop
      self.wanted_instances = []
4026 a8083063 Iustin Pop
      names = self.op.instances
4027 a8083063 Iustin Pop
      for name in names:
4028 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
4029 a8083063 Iustin Pop
        if instance is None:
4030 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
4031 515207af Guido Trotter
        self.wanted_instances.append(instance)
4032 a8083063 Iustin Pop
    else:
4033 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
4034 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
4035 a8083063 Iustin Pop
    return
4036 a8083063 Iustin Pop
4037 a8083063 Iustin Pop
4038 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
4039 a8083063 Iustin Pop
    """Compute block device status.
4040 a8083063 Iustin Pop

4041 a8083063 Iustin Pop
    """
4042 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
4043 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
4044 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
4045 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
4046 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
4047 a8083063 Iustin Pop
        snode = dev.logical_id[1]
4048 a8083063 Iustin Pop
      else:
4049 a8083063 Iustin Pop
        snode = dev.logical_id[0]
4050 a8083063 Iustin Pop
4051 a8083063 Iustin Pop
    if snode:
4052 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
4053 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
4054 a8083063 Iustin Pop
    else:
4055 a8083063 Iustin Pop
      dev_sstatus = None
4056 a8083063 Iustin Pop
4057 a8083063 Iustin Pop
    if dev.children:
4058 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
4059 a8083063 Iustin Pop
                      for child in dev.children]
4060 a8083063 Iustin Pop
    else:
4061 a8083063 Iustin Pop
      dev_children = []
4062 a8083063 Iustin Pop
4063 a8083063 Iustin Pop
    data = {
4064 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
4065 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
4066 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
4067 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
4068 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
4069 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
4070 a8083063 Iustin Pop
      "children": dev_children,
4071 a8083063 Iustin Pop
      }
4072 a8083063 Iustin Pop
4073 a8083063 Iustin Pop
    return data
4074 a8083063 Iustin Pop
4075 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4076 a8083063 Iustin Pop
    """Gather and return data"""
4077 a8083063 Iustin Pop
    result = {}
4078 a8083063 Iustin Pop
    for instance in self.wanted_instances:
4079 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
4080 a8083063 Iustin Pop
                                                instance.name)
4081 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
4082 a8083063 Iustin Pop
        remote_state = "up"
4083 a8083063 Iustin Pop
      else:
4084 a8083063 Iustin Pop
        remote_state = "down"
4085 a8083063 Iustin Pop
      if instance.status == "down":
4086 a8083063 Iustin Pop
        config_state = "down"
4087 a8083063 Iustin Pop
      else:
4088 a8083063 Iustin Pop
        config_state = "up"
4089 a8083063 Iustin Pop
4090 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
4091 a8083063 Iustin Pop
               for device in instance.disks]
4092 a8083063 Iustin Pop
4093 a8083063 Iustin Pop
      idict = {
4094 a8083063 Iustin Pop
        "name": instance.name,
4095 a8083063 Iustin Pop
        "config_state": config_state,
4096 a8083063 Iustin Pop
        "run_state": remote_state,
4097 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4098 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4099 a8083063 Iustin Pop
        "os": instance.os,
4100 a8083063 Iustin Pop
        "memory": instance.memory,
4101 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4102 a8083063 Iustin Pop
        "disks": disks,
4103 58acb49d Alexander Schreiber
        "network_port": instance.network_port,
4104 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4105 71aa8f73 Iustin Pop
        "kernel_path": instance.kernel_path,
4106 71aa8f73 Iustin Pop
        "initrd_path": instance.initrd_path,
4107 8ae6bb54 Iustin Pop
        "hvm_boot_order": instance.hvm_boot_order,
4108 a8083063 Iustin Pop
        }
4109 a8083063 Iustin Pop
4110 a8083063 Iustin Pop
      result[instance.name] = idict
4111 a8083063 Iustin Pop
4112 a8083063 Iustin Pop
    return result
4113 a8083063 Iustin Pop
4114 a8083063 Iustin Pop
4115 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
4116 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4117 a8083063 Iustin Pop

4118 a8083063 Iustin Pop
  """
4119 a8083063 Iustin Pop
  HPATH = "instance-modify"
4120 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4121 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4122 a8083063 Iustin Pop
4123 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4124 a8083063 Iustin Pop
    """Build hooks env.
4125 a8083063 Iustin Pop

4126 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4127 a8083063 Iustin Pop

4128 a8083063 Iustin Pop
    """
4129 396e1b78 Michael Hanselmann
    args = dict()
4130 a8083063 Iustin Pop
    if self.mem:
4131 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4132 a8083063 Iustin Pop
    if self.vcpus:
4133 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4134 ef756965 Iustin Pop
    if self.do_ip or self.do_bridge or self.mac:
4135 396e1b78 Michael Hanselmann
      if self.do_ip:
4136 396e1b78 Michael Hanselmann
        ip = self.ip
4137 396e1b78 Michael Hanselmann
      else:
4138 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4139 396e1b78 Michael Hanselmann
      if self.bridge:
4140 396e1b78 Michael Hanselmann
        bridge = self.bridge
4141 396e1b78 Michael Hanselmann
      else:
4142 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4143 ef756965 Iustin Pop
      if self.mac:
4144 ef756965 Iustin Pop
        mac = self.mac
4145 ef756965 Iustin Pop
      else:
4146 ef756965 Iustin Pop
        mac = self.instance.nics[0].mac
4147 ef756965 Iustin Pop
      args['nics'] = [(ip, bridge, mac)]
4148 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4149 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4150 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4151 a8083063 Iustin Pop
    return env, nl, nl
4152 a8083063 Iustin Pop
4153 a8083063 Iustin Pop
  def CheckPrereq(self):
4154 a8083063 Iustin Pop
    """Check prerequisites.
4155 a8083063 Iustin Pop

4156 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4157 a8083063 Iustin Pop

4158 a8083063 Iustin Pop
    """
4159 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4160 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4161 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4162 1862d460 Alexander Schreiber
    self.mac = getattr(self.op, "mac", None)
4163 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4164 973d7867 Iustin Pop
    self.kernel_path = getattr(self.op, "kernel_path", None)
4165 973d7867 Iustin Pop
    self.initrd_path = getattr(self.op, "initrd_path", None)
4166 25c5878d Alexander Schreiber
    self.hvm_boot_order = getattr(self.op, "hvm_boot_order", None)
4167 973d7867 Iustin Pop
    all_parms = [self.mem, self.vcpus, self.ip, self.bridge, self.mac,
4168 25c5878d Alexander Schreiber
                 self.kernel_path, self.initrd_path, self.hvm_boot_order]
4169 973d7867 Iustin Pop
    if all_parms.count(None) == len(all_parms):
4170 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4171 a8083063 Iustin Pop
    if self.mem is not None:
4172 a8083063 Iustin Pop
      try:
4173 a8083063 Iustin Pop
        self.mem = int(self.mem)
4174 a8083063 Iustin Pop
      except ValueError, err:
4175 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4176 a8083063 Iustin Pop
    if self.vcpus is not None:
4177 a8083063 Iustin Pop
      try:
4178 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4179 a8083063 Iustin Pop
      except ValueError, err:
4180 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4181 a8083063 Iustin Pop
    if self.ip is not None:
4182 a8083063 Iustin Pop
      self.do_ip = True
4183 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4184 a8083063 Iustin Pop
        self.ip = None
4185 a8083063 Iustin Pop
      else:
4186 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4187 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4188 a8083063 Iustin Pop
    else:
4189 a8083063 Iustin Pop
      self.do_ip = False
4190 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4191 1862d460 Alexander Schreiber
    if self.mac is not None:
4192 1862d460 Alexander Schreiber
      if self.cfg.IsMacInUse(self.mac):
4193 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('MAC address %s already in use in cluster' %
4194 1862d460 Alexander Schreiber
                                   self.mac)
4195 1862d460 Alexander Schreiber
      if not utils.IsValidMac(self.mac):
4196 1862d460 Alexander Schreiber
        raise errors.OpPrereqError('Invalid MAC address %s' % self.mac)
4197 a8083063 Iustin Pop
4198 973d7867 Iustin Pop
    if self.kernel_path is not None:
4199 973d7867 Iustin Pop
      self.do_kernel_path = True
4200 973d7867 Iustin Pop
      if self.kernel_path == constants.VALUE_NONE:
4201 973d7867 Iustin Pop
        raise errors.OpPrereqError("Can't set instance to no kernel")
4202 973d7867 Iustin Pop
4203 973d7867 Iustin Pop
      if self.kernel_path != constants.VALUE_DEFAULT:
4204 973d7867 Iustin Pop
        if not os.path.isabs(self.kernel_path):
4205 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The kernel path must be an absolute"
4206 973d7867 Iustin Pop
                                    " filename")
4207 8cafeb26 Iustin Pop
    else:
4208 8cafeb26 Iustin Pop
      self.do_kernel_path = False
4209 973d7867 Iustin Pop
4210 973d7867 Iustin Pop
    if self.initrd_path is not None:
4211 973d7867 Iustin Pop
      self.do_initrd_path = True
4212 973d7867 Iustin Pop
      if self.initrd_path not in (constants.VALUE_NONE,
4213 973d7867 Iustin Pop
                                  constants.VALUE_DEFAULT):
4214 2bc22872 Iustin Pop
        if not os.path.isabs(self.initrd_path):
4215 ba4b62cf Iustin Pop
          raise errors.OpPrereqError("The initrd path must be an absolute"
4216 973d7867 Iustin Pop
                                    " filename")
4217 8cafeb26 Iustin Pop
    else:
4218 8cafeb26 Iustin Pop
      self.do_initrd_path = False
4219 973d7867 Iustin Pop
4220 25c5878d Alexander Schreiber
    # boot order verification
4221 25c5878d Alexander Schreiber
    if self.hvm_boot_order is not None:
4222 25c5878d Alexander Schreiber
      if self.hvm_boot_order != constants.VALUE_DEFAULT:
4223 25c5878d Alexander Schreiber
        if len(self.hvm_boot_order.strip("acdn")) != 0:
4224 25c5878d Alexander Schreiber
          raise errors.OpPrereqError("invalid boot order specified,"
4225 25c5878d Alexander Schreiber
                                     " must be one or more of [acdn]"
4226 25c5878d Alexander Schreiber
                                     " or 'default'")
4227 25c5878d Alexander Schreiber
4228 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4229 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4230 a8083063 Iustin Pop
    if instance is None:
4231 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
4232 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4233 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4234 a8083063 Iustin Pop
    self.instance = instance
4235 a8083063 Iustin Pop
    return
4236 a8083063 Iustin Pop
4237 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4238 a8083063 Iustin Pop
    """Modifies an instance.
4239 a8083063 Iustin Pop

4240 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4241 a8083063 Iustin Pop
    """
4242 a8083063 Iustin Pop
    result = []
4243 a8083063 Iustin Pop
    instance = self.instance
4244 a8083063 Iustin Pop
    if self.mem:
4245 a8083063 Iustin Pop
      instance.memory = self.mem
4246 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4247 a8083063 Iustin Pop
    if self.vcpus:
4248 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4249 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4250 a8083063 Iustin Pop
    if self.do_ip:
4251 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4252 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4253 a8083063 Iustin Pop
    if self.bridge:
4254 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4255 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4256 1862d460 Alexander Schreiber
    if self.mac:
4257 1862d460 Alexander Schreiber
      instance.nics[0].mac = self.mac
4258 1862d460 Alexander Schreiber
      result.append(("mac", self.mac))
4259 973d7867 Iustin Pop
    if self.do_kernel_path:
4260 973d7867 Iustin Pop
      instance.kernel_path = self.kernel_path
4261 973d7867 Iustin Pop
      result.append(("kernel_path", self.kernel_path))
4262 973d7867 Iustin Pop
    if self.do_initrd_path:
4263 973d7867 Iustin Pop
      instance.initrd_path = self.initrd_path
4264 973d7867 Iustin Pop
      result.append(("initrd_path", self.initrd_path))
4265 25c5878d Alexander Schreiber
    if self.hvm_boot_order:
4266 25c5878d Alexander Schreiber
      if self.hvm_boot_order == constants.VALUE_DEFAULT:
4267 25c5878d Alexander Schreiber
        instance.hvm_boot_order = None
4268 25c5878d Alexander Schreiber
      else:
4269 25c5878d Alexander Schreiber
        instance.hvm_boot_order = self.hvm_boot_order
4270 25c5878d Alexander Schreiber
      result.append(("hvm_boot_order", self.hvm_boot_order))
4271 a8083063 Iustin Pop
4272 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4273 a8083063 Iustin Pop
4274 a8083063 Iustin Pop
    return result
4275 a8083063 Iustin Pop
4276 a8083063 Iustin Pop
4277 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4278 a8083063 Iustin Pop
  """Query the exports list
4279 a8083063 Iustin Pop

4280 a8083063 Iustin Pop
  """
4281 a8083063 Iustin Pop
  _OP_REQP = []
4282 a8083063 Iustin Pop
4283 a8083063 Iustin Pop
  def CheckPrereq(self):
4284 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4285 a8083063 Iustin Pop

4286 a8083063 Iustin Pop
    """
4287 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4288 a8083063 Iustin Pop
4289 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4290 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4291 a8083063 Iustin Pop

4292 a8083063 Iustin Pop
    Returns:
4293 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4294 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4295 a8083063 Iustin Pop
      that node.
4296 a8083063 Iustin Pop

4297 a8083063 Iustin Pop
    """
4298 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4299 a8083063 Iustin Pop
4300 a8083063 Iustin Pop
4301 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4302 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4303 a8083063 Iustin Pop

4304 a8083063 Iustin Pop
  """
4305 a8083063 Iustin Pop
  HPATH = "instance-export"
4306 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4307 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4308 a8083063 Iustin Pop
4309 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4310 a8083063 Iustin Pop
    """Build hooks env.
4311 a8083063 Iustin Pop

4312 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4313 a8083063 Iustin Pop

4314 a8083063 Iustin Pop
    """
4315 a8083063 Iustin Pop
    env = {
4316 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4317 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4318 a8083063 Iustin Pop
      }
4319 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4320 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4321 a8083063 Iustin Pop
          self.op.target_node]
4322 a8083063 Iustin Pop
    return env, nl, nl
4323 a8083063 Iustin Pop
4324 a8083063 Iustin Pop
  def CheckPrereq(self):
4325 a8083063 Iustin Pop
    """Check prerequisites.
4326 a8083063 Iustin Pop

4327 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
4328 a8083063 Iustin Pop

4329 a8083063 Iustin Pop
    """
4330 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4331 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4332 a8083063 Iustin Pop
    if self.instance is None:
4333 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4334 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4335 a8083063 Iustin Pop
4336 a8083063 Iustin Pop
    # node verification
4337 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4338 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4339 a8083063 Iustin Pop
4340 a8083063 Iustin Pop
    if self.dst_node is None:
4341 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4342 3ecf6786 Iustin Pop
                                 self.op.target_node)
4343 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4344 a8083063 Iustin Pop
4345 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4346 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4347 a8083063 Iustin Pop

4348 a8083063 Iustin Pop
    """
4349 a8083063 Iustin Pop
    instance = self.instance
4350 a8083063 Iustin Pop
    dst_node = self.dst_node
4351 a8083063 Iustin Pop
    src_node = instance.primary_node
4352 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
4353 a8083063 Iustin Pop
    if self.op.shutdown:
4354 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
4355 5bfac263 Iustin Pop
      self.proc.ChainOpCode(op)
4356 a8083063 Iustin Pop
4357 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4358 a8083063 Iustin Pop
4359 a8083063 Iustin Pop
    snap_disks = []
4360 a8083063 Iustin Pop
4361 a8083063 Iustin Pop
    try:
4362 a8083063 Iustin Pop
      for disk in instance.disks:
4363 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4364 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4365 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4366 a8083063 Iustin Pop
4367 a8083063 Iustin Pop
          if not new_dev_name:
4368 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4369 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4370 a8083063 Iustin Pop
          else:
4371 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4372 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4373 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4374 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4375 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4376 a8083063 Iustin Pop
4377 a8083063 Iustin Pop
    finally:
4378 a8083063 Iustin Pop
      if self.op.shutdown:
4379 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
4380 a8083063 Iustin Pop
                                       force=False)
4381 5bfac263 Iustin Pop
        self.proc.ChainOpCode(op)
4382 a8083063 Iustin Pop
4383 a8083063 Iustin Pop
    # TODO: check for size
4384 a8083063 Iustin Pop
4385 a8083063 Iustin Pop
    for dev in snap_disks:
4386 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
4387 a8083063 Iustin Pop
                                           instance):
4388 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
4389 a8083063 Iustin Pop
                     " %s to node %s" %
4390 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
4391 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4392 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
4393 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
4394 a8083063 Iustin Pop
4395 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4396 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4397 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4398 a8083063 Iustin Pop
4399 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4400 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4401 a8083063 Iustin Pop
4402 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4403 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4404 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4405 a8083063 Iustin Pop
    if nodelist:
4406 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4407 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4408 a8083063 Iustin Pop
      for node in exportlist:
4409 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4410 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4411 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4412 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4413 5c947f38 Iustin Pop
4414 5c947f38 Iustin Pop
4415 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4416 5c947f38 Iustin Pop
  """Generic tags LU.
4417 5c947f38 Iustin Pop

4418 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4419 5c947f38 Iustin Pop

4420 5c947f38 Iustin Pop
  """
4421 5c947f38 Iustin Pop
  def CheckPrereq(self):
4422 5c947f38 Iustin Pop
    """Check prerequisites.
4423 5c947f38 Iustin Pop

4424 5c947f38 Iustin Pop
    """
4425 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4426 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4427 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4428 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4429 5c947f38 Iustin Pop
      if name is None:
4430 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4431 3ecf6786 Iustin Pop
                                   (self.op.name,))
4432 5c947f38 Iustin Pop
      self.op.name = name
4433 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4434 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4435 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4436 5c947f38 Iustin Pop
      if name is None:
4437 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4438 3ecf6786 Iustin Pop
                                   (self.op.name,))
4439 5c947f38 Iustin Pop
      self.op.name = name
4440 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4441 5c947f38 Iustin Pop
    else:
4442 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4443 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4444 5c947f38 Iustin Pop
4445 5c947f38 Iustin Pop
4446 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4447 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4448 5c947f38 Iustin Pop

4449 5c947f38 Iustin Pop
  """
4450 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4451 5c947f38 Iustin Pop
4452 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4453 5c947f38 Iustin Pop
    """Returns the tag list.
4454 5c947f38 Iustin Pop

4455 5c947f38 Iustin Pop
    """
4456 5c947f38 Iustin Pop
    return self.target.GetTags()
4457 5c947f38 Iustin Pop
4458 5c947f38 Iustin Pop
4459 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4460 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4461 73415719 Iustin Pop

4462 73415719 Iustin Pop
  """
4463 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4464 73415719 Iustin Pop
4465 73415719 Iustin Pop
  def CheckPrereq(self):
4466 73415719 Iustin Pop
    """Check prerequisites.
4467 73415719 Iustin Pop

4468 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4469 73415719 Iustin Pop

4470 73415719 Iustin Pop
    """
4471 73415719 Iustin Pop
    try:
4472 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4473 73415719 Iustin Pop
    except re.error, err:
4474 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4475 73415719 Iustin Pop
                                 (self.op.pattern, err))
4476 73415719 Iustin Pop
4477 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4478 73415719 Iustin Pop
    """Returns the tag list.
4479 73415719 Iustin Pop

4480 73415719 Iustin Pop
    """
4481 73415719 Iustin Pop
    cfg = self.cfg
4482 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4483 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4484 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4485 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4486 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4487 73415719 Iustin Pop
    results = []
4488 73415719 Iustin Pop
    for path, target in tgts:
4489 73415719 Iustin Pop
      for tag in target.GetTags():
4490 73415719 Iustin Pop
        if self.re.search(tag):
4491 73415719 Iustin Pop
          results.append((path, tag))
4492 73415719 Iustin Pop
    return results
4493 73415719 Iustin Pop
4494 73415719 Iustin Pop
4495 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4496 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4497 5c947f38 Iustin Pop

4498 5c947f38 Iustin Pop
  """
4499 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4500 5c947f38 Iustin Pop
4501 5c947f38 Iustin Pop
  def CheckPrereq(self):
4502 5c947f38 Iustin Pop
    """Check prerequisites.
4503 5c947f38 Iustin Pop

4504 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4505 5c947f38 Iustin Pop

4506 5c947f38 Iustin Pop
    """
4507 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4508 f27302fa Iustin Pop
    for tag in self.op.tags:
4509 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4510 5c947f38 Iustin Pop
4511 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4512 5c947f38 Iustin Pop
    """Sets the tag.
4513 5c947f38 Iustin Pop

4514 5c947f38 Iustin Pop
    """
4515 5c947f38 Iustin Pop
    try:
4516 f27302fa Iustin Pop
      for tag in self.op.tags:
4517 f27302fa Iustin Pop
        self.target.AddTag(tag)
4518 5c947f38 Iustin Pop
    except errors.TagError, err:
4519 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4520 5c947f38 Iustin Pop
    try:
4521 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4522 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4523 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4524 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4525 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4526 5c947f38 Iustin Pop
4527 5c947f38 Iustin Pop
4528 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4529 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4530 5c947f38 Iustin Pop

4531 5c947f38 Iustin Pop
  """
4532 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4533 5c947f38 Iustin Pop
4534 5c947f38 Iustin Pop
  def CheckPrereq(self):
4535 5c947f38 Iustin Pop
    """Check prerequisites.
4536 5c947f38 Iustin Pop

4537 5c947f38 Iustin Pop
    This checks that we have the given tag.
4538 5c947f38 Iustin Pop

4539 5c947f38 Iustin Pop
    """
4540 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4541 f27302fa Iustin Pop
    for tag in self.op.tags:
4542 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4543 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4544 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4545 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4546 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4547 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4548 f27302fa Iustin Pop
      diff_names.sort()
4549 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4550 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4551 5c947f38 Iustin Pop
4552 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4553 5c947f38 Iustin Pop
    """Remove the tag from the object.
4554 5c947f38 Iustin Pop

4555 5c947f38 Iustin Pop
    """
4556 f27302fa Iustin Pop
    for tag in self.op.tags:
4557 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4558 5c947f38 Iustin Pop
    try:
4559 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4560 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4561 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4562 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4563 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4564 06009e27 Iustin Pop
4565 06009e27 Iustin Pop
class LUTestDelay(NoHooksLU):
4566 06009e27 Iustin Pop
  """Sleep for a specified amount of time.
4567 06009e27 Iustin Pop

4568 06009e27 Iustin Pop
  This LU sleeps on the master and/or nodes for a specified amoutn of
4569 06009e27 Iustin Pop
  time.
4570 06009e27 Iustin Pop

4571 06009e27 Iustin Pop
  """
4572 06009e27 Iustin Pop
  _OP_REQP = ["duration", "on_master", "on_nodes"]
4573 06009e27 Iustin Pop
4574 06009e27 Iustin Pop
  def CheckPrereq(self):
4575 06009e27 Iustin Pop
    """Check prerequisites.
4576 06009e27 Iustin Pop

4577 06009e27 Iustin Pop
    This checks that we have a good list of nodes and/or the duration
4578 06009e27 Iustin Pop
    is valid.
4579 06009e27 Iustin Pop

4580 06009e27 Iustin Pop
    """
4581 06009e27 Iustin Pop
4582 06009e27 Iustin Pop
    if self.op.on_nodes:
4583 06009e27 Iustin Pop
      self.op.on_nodes = _GetWantedNodes(self, self.op.on_nodes)
4584 06009e27 Iustin Pop
4585 06009e27 Iustin Pop
  def Exec(self, feedback_fn):
4586 06009e27 Iustin Pop
    """Do the actual sleep.
4587 06009e27 Iustin Pop

4588 06009e27 Iustin Pop
    """
4589 06009e27 Iustin Pop
    if self.op.on_master:
4590 06009e27 Iustin Pop
      if not utils.TestDelay(self.op.duration):
4591 06009e27 Iustin Pop
        raise errors.OpExecError("Error during master delay test")
4592 06009e27 Iustin Pop
    if self.op.on_nodes:
4593 06009e27 Iustin Pop
      result = rpc.call_test_delay(self.op.on_nodes, self.op.duration)
4594 06009e27 Iustin Pop
      if not result:
4595 06009e27 Iustin Pop
        raise errors.OpExecError("Complete failure from rpc call")
4596 06009e27 Iustin Pop
      for node, node_result in result.items():
4597 06009e27 Iustin Pop
        if not node_result:
4598 06009e27 Iustin Pop
          raise errors.OpExecError("Failure during rpc call to node %s,"
4599 06009e27 Iustin Pop
                                   " result: %s" % (node, node_result))