Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 6e06b36c

History | View | Annotate | Download (144 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 a8083063 Iustin Pop
46 a8083063 Iustin Pop
class LogicalUnit(object):
47 396e1b78 Michael Hanselmann
  """Logical Unit base class.
48 a8083063 Iustin Pop

49 a8083063 Iustin Pop
  Subclasses must follow these rules:
50 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
51 a8083063 Iustin Pop
      with all the fields (even if as None)
52 a8083063 Iustin Pop
    - implement Exec
53 a8083063 Iustin Pop
    - implement BuildHooksEnv
54 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
55 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
56 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
57 a8083063 Iustin Pop

58 a8083063 Iustin Pop
  """
59 a8083063 Iustin Pop
  HPATH = None
60 a8083063 Iustin Pop
  HTYPE = None
61 a8083063 Iustin Pop
  _OP_REQP = []
62 a8083063 Iustin Pop
  REQ_CLUSTER = True
63 a8083063 Iustin Pop
  REQ_MASTER = True
64 a8083063 Iustin Pop
65 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
66 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
67 a8083063 Iustin Pop

68 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
69 a8083063 Iustin Pop
    validity.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    """
72 5bfac263 Iustin Pop
    self.proc = processor
73 a8083063 Iustin Pop
    self.op = op
74 a8083063 Iustin Pop
    self.cfg = cfg
75 a8083063 Iustin Pop
    self.sstore = sstore
76 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
77 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
78 a8083063 Iustin Pop
      if attr_val is None:
79 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
80 3ecf6786 Iustin Pop
                                   attr_name)
81 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
82 a8083063 Iustin Pop
      if not cfg.IsCluster():
83 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
84 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
85 a8083063 Iustin Pop
      if self.REQ_MASTER:
86 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
87 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
88 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
89 3ecf6786 Iustin Pop
                                     " node %s" % master)
90 a8083063 Iustin Pop
91 a8083063 Iustin Pop
  def CheckPrereq(self):
92 a8083063 Iustin Pop
    """Check prerequisites for this LU.
93 a8083063 Iustin Pop

94 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
95 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
96 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
97 a8083063 Iustin Pop
    allowed.
98 a8083063 Iustin Pop

99 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
100 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
101 a8083063 Iustin Pop

102 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
103 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
104 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
105 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
106 a8083063 Iustin Pop

107 a8083063 Iustin Pop
    """
108 a8083063 Iustin Pop
    raise NotImplementedError
109 a8083063 Iustin Pop
110 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
111 a8083063 Iustin Pop
    """Execute the LU.
112 a8083063 Iustin Pop

113 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
114 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
115 a8083063 Iustin Pop
    code, or expected.
116 a8083063 Iustin Pop

117 a8083063 Iustin Pop
    """
118 a8083063 Iustin Pop
    raise NotImplementedError
119 a8083063 Iustin Pop
120 a8083063 Iustin Pop
  def BuildHooksEnv(self):
121 a8083063 Iustin Pop
    """Build hooks environment for this LU.
122 a8083063 Iustin Pop

123 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
124 a8083063 Iustin Pop
    containing the environment that will be used for running the
125 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
126 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
127 a8083063 Iustin Pop
    the hook should run after the execution.
128 a8083063 Iustin Pop

129 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
130 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
131 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
132 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
133 a8083063 Iustin Pop

134 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
135 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
136 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
137 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
138 a8083063 Iustin Pop
    None).
139 a8083063 Iustin Pop

140 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
141 a8083063 Iustin Pop
    not be called.
142 a8083063 Iustin Pop

143 a8083063 Iustin Pop
    """
144 a8083063 Iustin Pop
    raise NotImplementedError
145 a8083063 Iustin Pop
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
148 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
149 a8083063 Iustin Pop

150 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
151 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
152 a8083063 Iustin Pop

153 a8083063 Iustin Pop
  """
154 a8083063 Iustin Pop
  HPATH = None
155 a8083063 Iustin Pop
  HTYPE = None
156 a8083063 Iustin Pop
157 a8083063 Iustin Pop
  def BuildHooksEnv(self):
158 a8083063 Iustin Pop
    """Build hooks env.
159 a8083063 Iustin Pop

160 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
161 a8083063 Iustin Pop

162 a8083063 Iustin Pop
    """
163 0e137c28 Iustin Pop
    return {}, [], []
164 a8083063 Iustin Pop
165 a8083063 Iustin Pop
166 9440aeab Michael Hanselmann
def _AddHostToEtcHosts(hostname):
167 9440aeab Michael Hanselmann
  """Wrapper around utils.SetEtcHostsEntry.
168 9440aeab Michael Hanselmann

169 9440aeab Michael Hanselmann
  """
170 9440aeab Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
171 9440aeab Michael Hanselmann
  utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
172 9440aeab Michael Hanselmann
173 9440aeab Michael Hanselmann
174 c8a0948f Michael Hanselmann
def _RemoveHostFromEtcHosts(hostname):
175 9440aeab Michael Hanselmann
  """Wrapper around utils.RemoveEtcHostsEntry.
176 c8a0948f Michael Hanselmann

177 c8a0948f Michael Hanselmann
  """
178 c8a0948f Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
179 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
180 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
181 c8a0948f Michael Hanselmann
182 c8a0948f Michael Hanselmann
183 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
184 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
185 83120a01 Michael Hanselmann

186 83120a01 Michael Hanselmann
  Args:
187 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
188 83120a01 Michael Hanselmann

189 83120a01 Michael Hanselmann
  """
190 3312b702 Iustin Pop
  if not isinstance(nodes, list):
191 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
192 dcb93971 Michael Hanselmann
193 dcb93971 Michael Hanselmann
  if nodes:
194 3312b702 Iustin Pop
    wanted = []
195 dcb93971 Michael Hanselmann
196 dcb93971 Michael Hanselmann
    for name in nodes:
197 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
198 dcb93971 Michael Hanselmann
      if node is None:
199 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
200 3312b702 Iustin Pop
      wanted.append(node)
201 dcb93971 Michael Hanselmann
202 dcb93971 Michael Hanselmann
  else:
203 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
204 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
205 3312b702 Iustin Pop
206 3312b702 Iustin Pop
207 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
208 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
209 3312b702 Iustin Pop

210 3312b702 Iustin Pop
  Args:
211 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
212 3312b702 Iustin Pop

213 3312b702 Iustin Pop
  """
214 3312b702 Iustin Pop
  if not isinstance(instances, list):
215 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
216 3312b702 Iustin Pop
217 3312b702 Iustin Pop
  if instances:
218 3312b702 Iustin Pop
    wanted = []
219 3312b702 Iustin Pop
220 3312b702 Iustin Pop
    for name in instances:
221 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
222 3312b702 Iustin Pop
      if instance is None:
223 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
224 3312b702 Iustin Pop
      wanted.append(instance)
225 3312b702 Iustin Pop
226 3312b702 Iustin Pop
  else:
227 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
228 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
229 dcb93971 Michael Hanselmann
230 dcb93971 Michael Hanselmann
231 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
232 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
233 83120a01 Michael Hanselmann

234 83120a01 Michael Hanselmann
  Args:
235 83120a01 Michael Hanselmann
    static: Static fields
236 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
237 83120a01 Michael Hanselmann

238 83120a01 Michael Hanselmann
  """
239 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
240 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
241 dcb93971 Michael Hanselmann
242 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
243 dcb93971 Michael Hanselmann
244 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
245 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
246 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
247 3ecf6786 Iustin Pop
                                          difference(all_fields)))
248 dcb93971 Michael Hanselmann
249 dcb93971 Michael Hanselmann
250 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
251 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
252 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
253 ecb215b5 Michael Hanselmann

254 ecb215b5 Michael Hanselmann
  Args:
255 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
256 396e1b78 Michael Hanselmann
  """
257 396e1b78 Michael Hanselmann
  env = {
258 0e137c28 Iustin Pop
    "OP_TARGET": name,
259 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
260 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
261 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
262 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
263 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
264 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
265 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
266 396e1b78 Michael Hanselmann
  }
267 396e1b78 Michael Hanselmann
268 396e1b78 Michael Hanselmann
  if nics:
269 396e1b78 Michael Hanselmann
    nic_count = len(nics)
270 396e1b78 Michael Hanselmann
    for idx, (ip, bridge) in enumerate(nics):
271 396e1b78 Michael Hanselmann
      if ip is None:
272 396e1b78 Michael Hanselmann
        ip = ""
273 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
274 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
275 396e1b78 Michael Hanselmann
  else:
276 396e1b78 Michael Hanselmann
    nic_count = 0
277 396e1b78 Michael Hanselmann
278 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
279 396e1b78 Michael Hanselmann
280 396e1b78 Michael Hanselmann
  return env
281 396e1b78 Michael Hanselmann
282 396e1b78 Michael Hanselmann
283 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
284 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
285 ecb215b5 Michael Hanselmann

286 ecb215b5 Michael Hanselmann
  Args:
287 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
288 ecb215b5 Michael Hanselmann
    override: dict of values to override
289 ecb215b5 Michael Hanselmann
  """
290 396e1b78 Michael Hanselmann
  args = {
291 396e1b78 Michael Hanselmann
    'name': instance.name,
292 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
293 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
294 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
295 396e1b78 Michael Hanselmann
    'status': instance.os,
296 396e1b78 Michael Hanselmann
    'memory': instance.memory,
297 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
298 396e1b78 Michael Hanselmann
    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
299 396e1b78 Michael Hanselmann
  }
300 396e1b78 Michael Hanselmann
  if override:
301 396e1b78 Michael Hanselmann
    args.update(override)
302 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
303 396e1b78 Michael Hanselmann
304 396e1b78 Michael Hanselmann
305 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
306 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
307 a8083063 Iustin Pop

308 a8083063 Iustin Pop
  Args:
309 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
310 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
311 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
312 a8083063 Iustin Pop

313 a8083063 Iustin Pop
  """
314 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
315 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
316 a8083063 Iustin Pop
  else:
317 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
318 a8083063 Iustin Pop
319 a8083063 Iustin Pop
  inthere = False
320 a8083063 Iustin Pop
321 a8083063 Iustin Pop
  save_lines = []
322 a8083063 Iustin Pop
  add_lines = []
323 a8083063 Iustin Pop
  removed = False
324 a8083063 Iustin Pop
325 4cc2a728 Michael Hanselmann
  for rawline in f:
326 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
327 a8083063 Iustin Pop
328 4cc2a728 Michael Hanselmann
    parts = rawline.rstrip('\r\n').split()
329 4cc2a728 Michael Hanselmann
330 4cc2a728 Michael Hanselmann
    # Ignore unwanted lines
331 4cc2a728 Michael Hanselmann
    if len(parts) >= 3 and not rawline.lstrip()[0] == '#':
332 4cc2a728 Michael Hanselmann
      fields = parts[0].split(',')
333 4cc2a728 Michael Hanselmann
      key = parts[2]
334 4cc2a728 Michael Hanselmann
335 4cc2a728 Michael Hanselmann
      haveall = True
336 4cc2a728 Michael Hanselmann
      havesome = False
337 4cc2a728 Michael Hanselmann
      for spec in [ ip, fullnode ]:
338 4cc2a728 Michael Hanselmann
        if spec not in fields:
339 4cc2a728 Michael Hanselmann
          haveall = False
340 4cc2a728 Michael Hanselmann
        if spec in fields:
341 4cc2a728 Michael Hanselmann
          havesome = True
342 4cc2a728 Michael Hanselmann
343 4cc2a728 Michael Hanselmann
      logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
344 4cc2a728 Michael Hanselmann
      if haveall and key == pubkey:
345 4cc2a728 Michael Hanselmann
        inthere = True
346 4cc2a728 Michael Hanselmann
        save_lines.append(rawline)
347 4cc2a728 Michael Hanselmann
        logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
348 4cc2a728 Michael Hanselmann
        continue
349 4cc2a728 Michael Hanselmann
350 4cc2a728 Michael Hanselmann
      if havesome and (not haveall or key != pubkey):
351 4cc2a728 Michael Hanselmann
        removed = True
352 4cc2a728 Michael Hanselmann
        logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
353 4cc2a728 Michael Hanselmann
        continue
354 a8083063 Iustin Pop
355 a8083063 Iustin Pop
    save_lines.append(rawline)
356 a8083063 Iustin Pop
357 a8083063 Iustin Pop
  if not inthere:
358 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
359 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
360 a8083063 Iustin Pop
361 a8083063 Iustin Pop
  if removed:
362 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
363 a8083063 Iustin Pop
364 a8083063 Iustin Pop
    # Write a new file and replace old.
365 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
366 82122173 Iustin Pop
                                   constants.DATA_DIR)
367 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
368 82122173 Iustin Pop
    try:
369 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
370 82122173 Iustin Pop
    finally:
371 82122173 Iustin Pop
      newfile.close()
372 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
373 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
374 a8083063 Iustin Pop
375 a8083063 Iustin Pop
  elif add_lines:
376 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
377 a8083063 Iustin Pop
    f.seek(0, 2)
378 a8083063 Iustin Pop
    for add in add_lines:
379 a8083063 Iustin Pop
      f.write(add)
380 a8083063 Iustin Pop
381 a8083063 Iustin Pop
  f.close()
382 a8083063 Iustin Pop
383 a8083063 Iustin Pop
384 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
385 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
386 a8083063 Iustin Pop

387 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
388 a8083063 Iustin Pop
  is the error message.
389 a8083063 Iustin Pop

390 a8083063 Iustin Pop
  """
391 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
392 a8083063 Iustin Pop
  if vgsize is None:
393 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
394 a8083063 Iustin Pop
  elif vgsize < 20480:
395 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
396 191a8385 Guido Trotter
            (vgname, vgsize))
397 a8083063 Iustin Pop
  return None
398 a8083063 Iustin Pop
399 a8083063 Iustin Pop
400 a8083063 Iustin Pop
def _InitSSHSetup(node):
401 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
402 a8083063 Iustin Pop

403 a8083063 Iustin Pop

404 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
405 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
406 a8083063 Iustin Pop

407 a8083063 Iustin Pop
  Args:
408 a8083063 Iustin Pop
    node: the name of this host as a fqdn
409 a8083063 Iustin Pop

410 a8083063 Iustin Pop
  """
411 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
412 a8083063 Iustin Pop
413 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
414 70d9e3d8 Iustin Pop
    if os.path.exists(name):
415 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
416 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
417 a8083063 Iustin Pop
418 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
419 70d9e3d8 Iustin Pop
                         "-f", priv_key,
420 a8083063 Iustin Pop
                         "-q", "-N", ""])
421 a8083063 Iustin Pop
  if result.failed:
422 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
423 3ecf6786 Iustin Pop
                             result.output)
424 a8083063 Iustin Pop
425 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
426 a8083063 Iustin Pop
  try:
427 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
428 a8083063 Iustin Pop
  finally:
429 a8083063 Iustin Pop
    f.close()
430 a8083063 Iustin Pop
431 a8083063 Iustin Pop
432 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
433 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
434 a8083063 Iustin Pop

435 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
436 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
437 a8083063 Iustin Pop

438 a8083063 Iustin Pop
  """
439 a8083063 Iustin Pop
  # Create pseudo random password
440 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
441 a8083063 Iustin Pop
  # and write it into sstore
442 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
443 a8083063 Iustin Pop
444 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
445 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
446 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
447 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
448 a8083063 Iustin Pop
  if result.failed:
449 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
450 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
451 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
452 a8083063 Iustin Pop
453 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
454 a8083063 Iustin Pop
455 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
456 a8083063 Iustin Pop
457 a8083063 Iustin Pop
  if result.failed:
458 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
459 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
460 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
461 a8083063 Iustin Pop
462 a8083063 Iustin Pop
463 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
464 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
465 bf6929a2 Alexander Schreiber

466 bf6929a2 Alexander Schreiber
  """
467 bf6929a2 Alexander Schreiber
  # check bridges existance
468 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
469 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
470 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
471 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
472 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
473 bf6929a2 Alexander Schreiber
474 bf6929a2 Alexander Schreiber
475 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
476 a8083063 Iustin Pop
  """Initialise the cluster.
477 a8083063 Iustin Pop

478 a8083063 Iustin Pop
  """
479 a8083063 Iustin Pop
  HPATH = "cluster-init"
480 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
481 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
482 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
483 a8083063 Iustin Pop
  REQ_CLUSTER = False
484 a8083063 Iustin Pop
485 a8083063 Iustin Pop
  def BuildHooksEnv(self):
486 a8083063 Iustin Pop
    """Build hooks env.
487 a8083063 Iustin Pop

488 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
489 a8083063 Iustin Pop
    ourselves in the post-run node list.
490 a8083063 Iustin Pop

491 a8083063 Iustin Pop
    """
492 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
493 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
494 a8083063 Iustin Pop
495 a8083063 Iustin Pop
  def CheckPrereq(self):
496 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
497 a8083063 Iustin Pop

498 a8083063 Iustin Pop
    """
499 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
500 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
501 a8083063 Iustin Pop
502 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
503 ff98055b Iustin Pop
504 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
505 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
506 130e907e Iustin Pop
                                 " range (%s). Please fix DNS or /etc/hosts." %
507 bcf043c9 Iustin Pop
                                 (hostname.ip,))
508 130e907e Iustin Pop
509 89e1fc26 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
510 a8083063 Iustin Pop
511 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, hostname.ip,
512 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
513 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
514 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
515 3ecf6786 Iustin Pop
                                 " belong to this host."
516 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
517 a8083063 Iustin Pop
518 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
519 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
520 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
521 16abfbc2 Alexander Schreiber
    if (secondary_ip and
522 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
523 16abfbc2 Alexander Schreiber
        (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
524 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT))):
525 16abfbc2 Alexander Schreiber
      raise errors.OpPrereqError("You gave %s as secondary IP,\n"
526 16abfbc2 Alexander Schreiber
                                 "but it does not belong to this host." %
527 16abfbc2 Alexander Schreiber
                                 secondary_ip)
528 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
529 a8083063 Iustin Pop
530 a8083063 Iustin Pop
    # checks presence of the volume group given
531 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
532 a8083063 Iustin Pop
533 a8083063 Iustin Pop
    if vgstatus:
534 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
535 a8083063 Iustin Pop
536 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
537 a8083063 Iustin Pop
                    self.op.mac_prefix):
538 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
539 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
540 a8083063 Iustin Pop
541 a8083063 Iustin Pop
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
542 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
543 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
544 a8083063 Iustin Pop
545 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
546 880478f8 Iustin Pop
    if result.failed:
547 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
548 8925faaa Iustin Pop
                                 (self.op.master_netdev,
549 8925faaa Iustin Pop
                                  result.output.strip()))
550 880478f8 Iustin Pop
551 7dd30006 Michael Hanselmann
    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
552 7dd30006 Michael Hanselmann
            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
553 7dd30006 Michael Hanselmann
      raise errors.OpPrereqError("Init.d script '%s' missing or not "
554 7dd30006 Michael Hanselmann
                                 "executable." % constants.NODE_INITD_SCRIPT)
555 c7b46d59 Iustin Pop
556 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
557 a8083063 Iustin Pop
    """Initialize the cluster.
558 a8083063 Iustin Pop

559 a8083063 Iustin Pop
    """
560 a8083063 Iustin Pop
    clustername = self.clustername
561 a8083063 Iustin Pop
    hostname = self.hostname
562 a8083063 Iustin Pop
563 a8083063 Iustin Pop
    # set up the simple store
564 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
565 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
566 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
567 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
568 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
569 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
570 a8083063 Iustin Pop
571 a8083063 Iustin Pop
    # set up the inter-node password and certificate
572 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
573 a8083063 Iustin Pop
574 a8083063 Iustin Pop
    # start the master ip
575 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
576 a8083063 Iustin Pop
577 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
578 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
579 a8083063 Iustin Pop
    try:
580 a8083063 Iustin Pop
      sshline = f.read()
581 a8083063 Iustin Pop
    finally:
582 a8083063 Iustin Pop
      f.close()
583 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
584 a8083063 Iustin Pop
585 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(hostname.name)
586 a8083063 Iustin Pop
587 bcf043c9 Iustin Pop
    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
588 a8083063 Iustin Pop
589 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
590 a8083063 Iustin Pop
591 a8083063 Iustin Pop
    # init of cluster config file
592 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
593 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
594 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
595 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
596 a8083063 Iustin Pop
597 a8083063 Iustin Pop
598 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
599 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
600 a8083063 Iustin Pop

601 a8083063 Iustin Pop
  """
602 a8083063 Iustin Pop
  _OP_REQP = []
603 a8083063 Iustin Pop
604 a8083063 Iustin Pop
  def CheckPrereq(self):
605 a8083063 Iustin Pop
    """Check prerequisites.
606 a8083063 Iustin Pop

607 a8083063 Iustin Pop
    This checks whether the cluster is empty.
608 a8083063 Iustin Pop

609 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
610 a8083063 Iustin Pop

611 a8083063 Iustin Pop
    """
612 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
615 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
616 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
617 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
618 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
619 db915bd1 Michael Hanselmann
    if instancelist:
620 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
621 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
622 a8083063 Iustin Pop
623 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
624 a8083063 Iustin Pop
    """Destroys the cluster.
625 a8083063 Iustin Pop

626 a8083063 Iustin Pop
    """
627 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
628 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
629 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
630 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
631 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
632 a8083063 Iustin Pop
633 a8083063 Iustin Pop
634 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
635 a8083063 Iustin Pop
  """Verifies the cluster status.
636 a8083063 Iustin Pop

637 a8083063 Iustin Pop
  """
638 a8083063 Iustin Pop
  _OP_REQP = []
639 a8083063 Iustin Pop
640 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
641 a8083063 Iustin Pop
                  remote_version, feedback_fn):
642 a8083063 Iustin Pop
    """Run multiple tests against a node.
643 a8083063 Iustin Pop

644 a8083063 Iustin Pop
    Test list:
645 a8083063 Iustin Pop
      - compares ganeti version
646 a8083063 Iustin Pop
      - checks vg existance and size > 20G
647 a8083063 Iustin Pop
      - checks config file checksum
648 a8083063 Iustin Pop
      - checks ssh to other nodes
649 a8083063 Iustin Pop

650 a8083063 Iustin Pop
    Args:
651 a8083063 Iustin Pop
      node: name of the node to check
652 a8083063 Iustin Pop
      file_list: required list of files
653 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
654 098c0958 Michael Hanselmann

655 a8083063 Iustin Pop
    """
656 a8083063 Iustin Pop
    # compares ganeti version
657 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
658 a8083063 Iustin Pop
    if not remote_version:
659 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
660 a8083063 Iustin Pop
      return True
661 a8083063 Iustin Pop
662 a8083063 Iustin Pop
    if local_version != remote_version:
663 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
664 a8083063 Iustin Pop
                      (local_version, node, remote_version))
665 a8083063 Iustin Pop
      return True
666 a8083063 Iustin Pop
667 a8083063 Iustin Pop
    # checks vg existance and size > 20G
668 a8083063 Iustin Pop
669 a8083063 Iustin Pop
    bad = False
670 a8083063 Iustin Pop
    if not vglist:
671 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
672 a8083063 Iustin Pop
                      (node,))
673 a8083063 Iustin Pop
      bad = True
674 a8083063 Iustin Pop
    else:
675 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
676 a8083063 Iustin Pop
      if vgstatus:
677 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
678 a8083063 Iustin Pop
        bad = True
679 a8083063 Iustin Pop
680 a8083063 Iustin Pop
    # checks config file checksum
681 a8083063 Iustin Pop
    # checks ssh to any
682 a8083063 Iustin Pop
683 a8083063 Iustin Pop
    if 'filelist' not in node_result:
684 a8083063 Iustin Pop
      bad = True
685 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
686 a8083063 Iustin Pop
    else:
687 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
688 a8083063 Iustin Pop
      for file_name in file_list:
689 a8083063 Iustin Pop
        if file_name not in remote_cksum:
690 a8083063 Iustin Pop
          bad = True
691 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
692 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
693 a8083063 Iustin Pop
          bad = True
694 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
695 a8083063 Iustin Pop
696 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
697 a8083063 Iustin Pop
      bad = True
698 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
699 a8083063 Iustin Pop
    else:
700 a8083063 Iustin Pop
      if node_result['nodelist']:
701 a8083063 Iustin Pop
        bad = True
702 a8083063 Iustin Pop
        for node in node_result['nodelist']:
703 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
704 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
705 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
706 a8083063 Iustin Pop
    if hyp_result is not None:
707 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
708 a8083063 Iustin Pop
    return bad
709 a8083063 Iustin Pop
710 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
711 a8083063 Iustin Pop
    """Verify an instance.
712 a8083063 Iustin Pop

713 a8083063 Iustin Pop
    This function checks to see if the required block devices are
714 a8083063 Iustin Pop
    available on the instance's node.
715 a8083063 Iustin Pop

716 a8083063 Iustin Pop
    """
717 a8083063 Iustin Pop
    bad = False
718 a8083063 Iustin Pop
719 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
720 a8083063 Iustin Pop
    if not instance in instancelist:
721 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
722 a8083063 Iustin Pop
                      (instance, instancelist))
723 a8083063 Iustin Pop
      bad = True
724 a8083063 Iustin Pop
725 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
726 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
727 a8083063 Iustin Pop
728 a8083063 Iustin Pop
    node_vol_should = {}
729 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
730 a8083063 Iustin Pop
731 a8083063 Iustin Pop
    for node in node_vol_should:
732 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
733 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
734 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
735 a8083063 Iustin Pop
                          (volume, node))
736 a8083063 Iustin Pop
          bad = True
737 a8083063 Iustin Pop
738 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
739 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
740 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
741 a8083063 Iustin Pop
                        (instance, node_current))
742 a8083063 Iustin Pop
        bad = True
743 a8083063 Iustin Pop
744 a8083063 Iustin Pop
    for node in node_instance:
745 a8083063 Iustin Pop
      if (not node == node_current):
746 a8083063 Iustin Pop
        if instance in node_instance[node]:
747 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
748 a8083063 Iustin Pop
                          (instance, node))
749 a8083063 Iustin Pop
          bad = True
750 a8083063 Iustin Pop
751 6a438c98 Michael Hanselmann
    return bad
752 a8083063 Iustin Pop
753 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
754 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
755 a8083063 Iustin Pop

756 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
757 a8083063 Iustin Pop
    reported as unknown.
758 a8083063 Iustin Pop

759 a8083063 Iustin Pop
    """
760 a8083063 Iustin Pop
    bad = False
761 a8083063 Iustin Pop
762 a8083063 Iustin Pop
    for node in node_vol_is:
763 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
764 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
765 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
766 a8083063 Iustin Pop
                      (volume, node))
767 a8083063 Iustin Pop
          bad = True
768 a8083063 Iustin Pop
    return bad
769 a8083063 Iustin Pop
770 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
771 a8083063 Iustin Pop
    """Verify the list of running instances.
772 a8083063 Iustin Pop

773 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
774 a8083063 Iustin Pop

775 a8083063 Iustin Pop
    """
776 a8083063 Iustin Pop
    bad = False
777 a8083063 Iustin Pop
    for node in node_instance:
778 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
779 a8083063 Iustin Pop
        if runninginstance not in instancelist:
780 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
781 a8083063 Iustin Pop
                          (runninginstance, node))
782 a8083063 Iustin Pop
          bad = True
783 a8083063 Iustin Pop
    return bad
784 a8083063 Iustin Pop
785 a8083063 Iustin Pop
  def CheckPrereq(self):
786 a8083063 Iustin Pop
    """Check prerequisites.
787 a8083063 Iustin Pop

788 a8083063 Iustin Pop
    This has no prerequisites.
789 a8083063 Iustin Pop

790 a8083063 Iustin Pop
    """
791 a8083063 Iustin Pop
    pass
792 a8083063 Iustin Pop
793 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
794 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
795 a8083063 Iustin Pop

796 a8083063 Iustin Pop
    """
797 a8083063 Iustin Pop
    bad = False
798 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
799 a8083063 Iustin Pop
    self.cfg.VerifyConfig()
800 a8083063 Iustin Pop
801 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
802 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
803 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
804 a8083063 Iustin Pop
    node_volume = {}
805 a8083063 Iustin Pop
    node_instance = {}
806 a8083063 Iustin Pop
807 a8083063 Iustin Pop
    # FIXME: verify OS list
808 a8083063 Iustin Pop
    # do local checksums
809 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
810 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
811 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
812 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
813 a8083063 Iustin Pop
814 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
815 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
816 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
817 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
818 a8083063 Iustin Pop
    node_verify_param = {
819 a8083063 Iustin Pop
      'filelist': file_names,
820 a8083063 Iustin Pop
      'nodelist': nodelist,
821 a8083063 Iustin Pop
      'hypervisor': None,
822 a8083063 Iustin Pop
      }
823 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
824 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
825 a8083063 Iustin Pop
826 a8083063 Iustin Pop
    for node in nodelist:
827 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
828 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
829 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
830 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
831 a8083063 Iustin Pop
      bad = bad or result
832 a8083063 Iustin Pop
833 a8083063 Iustin Pop
      # node_volume
834 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
835 a8083063 Iustin Pop
836 a8083063 Iustin Pop
      if type(volumeinfo) != dict:
837 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
838 a8083063 Iustin Pop
        bad = True
839 a8083063 Iustin Pop
        continue
840 a8083063 Iustin Pop
841 a8083063 Iustin Pop
      node_volume[node] = volumeinfo
842 a8083063 Iustin Pop
843 a8083063 Iustin Pop
      # node_instance
844 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
845 a8083063 Iustin Pop
      if type(nodeinstance) != list:
846 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
847 a8083063 Iustin Pop
        bad = True
848 a8083063 Iustin Pop
        continue
849 a8083063 Iustin Pop
850 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
851 a8083063 Iustin Pop
852 a8083063 Iustin Pop
    node_vol_should = {}
853 a8083063 Iustin Pop
854 a8083063 Iustin Pop
    for instance in instancelist:
855 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
856 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
857 a8083063 Iustin Pop
                                     feedback_fn)
858 a8083063 Iustin Pop
      bad = bad or result
859 a8083063 Iustin Pop
860 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
861 a8083063 Iustin Pop
862 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
863 a8083063 Iustin Pop
864 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
865 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
866 a8083063 Iustin Pop
                                       feedback_fn)
867 a8083063 Iustin Pop
    bad = bad or result
868 a8083063 Iustin Pop
869 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
870 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
871 a8083063 Iustin Pop
                                         feedback_fn)
872 a8083063 Iustin Pop
    bad = bad or result
873 a8083063 Iustin Pop
874 a8083063 Iustin Pop
    return int(bad)
875 a8083063 Iustin Pop
876 a8083063 Iustin Pop
877 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
878 07bd8a51 Iustin Pop
  """Rename the cluster.
879 07bd8a51 Iustin Pop

880 07bd8a51 Iustin Pop
  """
881 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
882 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
883 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
884 07bd8a51 Iustin Pop
885 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
886 07bd8a51 Iustin Pop
    """Build hooks env.
887 07bd8a51 Iustin Pop

888 07bd8a51 Iustin Pop
    """
889 07bd8a51 Iustin Pop
    env = {
890 0e137c28 Iustin Pop
      "OP_TARGET": self.op.sstore.GetClusterName(),
891 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
892 07bd8a51 Iustin Pop
      }
893 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
894 07bd8a51 Iustin Pop
    return env, [mn], [mn]
895 07bd8a51 Iustin Pop
896 07bd8a51 Iustin Pop
  def CheckPrereq(self):
897 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
898 07bd8a51 Iustin Pop

899 07bd8a51 Iustin Pop
    """
900 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
901 07bd8a51 Iustin Pop
902 bcf043c9 Iustin Pop
    new_name = hostname.name
903 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
904 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
905 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
906 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
907 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
908 07bd8a51 Iustin Pop
                                 " cluster has changed")
909 07bd8a51 Iustin Pop
    if new_ip != old_ip:
910 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
911 07bd8a51 Iustin Pop
      if not result.failed:
912 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
913 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
914 07bd8a51 Iustin Pop
                                   new_ip)
915 07bd8a51 Iustin Pop
916 07bd8a51 Iustin Pop
    self.op.name = new_name
917 07bd8a51 Iustin Pop
918 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
919 07bd8a51 Iustin Pop
    """Rename the cluster.
920 07bd8a51 Iustin Pop

921 07bd8a51 Iustin Pop
    """
922 07bd8a51 Iustin Pop
    clustername = self.op.name
923 07bd8a51 Iustin Pop
    ip = self.ip
924 07bd8a51 Iustin Pop
    ss = self.sstore
925 07bd8a51 Iustin Pop
926 07bd8a51 Iustin Pop
    # shutdown the master IP
927 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
928 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
929 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
930 07bd8a51 Iustin Pop
931 07bd8a51 Iustin Pop
    try:
932 07bd8a51 Iustin Pop
      # modify the sstore
933 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
934 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
935 07bd8a51 Iustin Pop
936 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
937 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
938 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
939 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
940 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
941 07bd8a51 Iustin Pop
942 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
943 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
944 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
945 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
946 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
947 07bd8a51 Iustin Pop
          if not result[to_node]:
948 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
949 07bd8a51 Iustin Pop
                         (fname, to_node))
950 07bd8a51 Iustin Pop
    finally:
951 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
952 07bd8a51 Iustin Pop
        logger.Error("Could not re-enable the master role on the master,\n"
953 07bd8a51 Iustin Pop
                     "please restart manually.")
954 07bd8a51 Iustin Pop
955 07bd8a51 Iustin Pop
956 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
957 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
958 a8083063 Iustin Pop

959 a8083063 Iustin Pop
  """
960 a8083063 Iustin Pop
  if not instance.disks:
961 a8083063 Iustin Pop
    return True
962 a8083063 Iustin Pop
963 a8083063 Iustin Pop
  if not oneshot:
964 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
965 a8083063 Iustin Pop
966 a8083063 Iustin Pop
  node = instance.primary_node
967 a8083063 Iustin Pop
968 a8083063 Iustin Pop
  for dev in instance.disks:
969 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
970 a8083063 Iustin Pop
971 a8083063 Iustin Pop
  retries = 0
972 a8083063 Iustin Pop
  while True:
973 a8083063 Iustin Pop
    max_time = 0
974 a8083063 Iustin Pop
    done = True
975 a8083063 Iustin Pop
    cumul_degraded = False
976 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
977 a8083063 Iustin Pop
    if not rstats:
978 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
979 a8083063 Iustin Pop
      retries += 1
980 a8083063 Iustin Pop
      if retries >= 10:
981 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
982 3ecf6786 Iustin Pop
                                 " aborting." % node)
983 a8083063 Iustin Pop
      time.sleep(6)
984 a8083063 Iustin Pop
      continue
985 a8083063 Iustin Pop
    retries = 0
986 a8083063 Iustin Pop
    for i in range(len(rstats)):
987 a8083063 Iustin Pop
      mstat = rstats[i]
988 a8083063 Iustin Pop
      if mstat is None:
989 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
990 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
991 a8083063 Iustin Pop
        continue
992 0834c866 Iustin Pop
      # we ignore the ldisk parameter
993 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
994 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
995 a8083063 Iustin Pop
      if perc_done is not None:
996 a8083063 Iustin Pop
        done = False
997 a8083063 Iustin Pop
        if est_time is not None:
998 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
999 a8083063 Iustin Pop
          max_time = est_time
1000 a8083063 Iustin Pop
        else:
1001 a8083063 Iustin Pop
          rem_time = "no time estimate"
1002 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1003 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1004 a8083063 Iustin Pop
    if done or oneshot:
1005 a8083063 Iustin Pop
      break
1006 a8083063 Iustin Pop
1007 a8083063 Iustin Pop
    if unlock:
1008 a8083063 Iustin Pop
      utils.Unlock('cmd')
1009 a8083063 Iustin Pop
    try:
1010 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1011 a8083063 Iustin Pop
    finally:
1012 a8083063 Iustin Pop
      if unlock:
1013 a8083063 Iustin Pop
        utils.Lock('cmd')
1014 a8083063 Iustin Pop
1015 a8083063 Iustin Pop
  if done:
1016 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1017 a8083063 Iustin Pop
  return not cumul_degraded
1018 a8083063 Iustin Pop
1019 a8083063 Iustin Pop
1020 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1021 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1022 a8083063 Iustin Pop

1023 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1024 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1025 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1026 0834c866 Iustin Pop

1027 a8083063 Iustin Pop
  """
1028 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1029 0834c866 Iustin Pop
  if ldisk:
1030 0834c866 Iustin Pop
    idx = 6
1031 0834c866 Iustin Pop
  else:
1032 0834c866 Iustin Pop
    idx = 5
1033 a8083063 Iustin Pop
1034 a8083063 Iustin Pop
  result = True
1035 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1036 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1037 a8083063 Iustin Pop
    if not rstats:
1038 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1039 a8083063 Iustin Pop
      result = False
1040 a8083063 Iustin Pop
    else:
1041 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1042 a8083063 Iustin Pop
  if dev.children:
1043 a8083063 Iustin Pop
    for child in dev.children:
1044 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1045 a8083063 Iustin Pop
1046 a8083063 Iustin Pop
  return result
1047 a8083063 Iustin Pop
1048 a8083063 Iustin Pop
1049 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1050 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1051 a8083063 Iustin Pop

1052 a8083063 Iustin Pop
  """
1053 a8083063 Iustin Pop
  _OP_REQP = []
1054 a8083063 Iustin Pop
1055 a8083063 Iustin Pop
  def CheckPrereq(self):
1056 a8083063 Iustin Pop
    """Check prerequisites.
1057 a8083063 Iustin Pop

1058 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1059 a8083063 Iustin Pop

1060 a8083063 Iustin Pop
    """
1061 a8083063 Iustin Pop
    return
1062 a8083063 Iustin Pop
1063 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1064 a8083063 Iustin Pop
    """Compute the list of OSes.
1065 a8083063 Iustin Pop

1066 a8083063 Iustin Pop
    """
1067 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1068 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1069 a8083063 Iustin Pop
    if node_data == False:
1070 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1071 a8083063 Iustin Pop
    return node_data
1072 a8083063 Iustin Pop
1073 a8083063 Iustin Pop
1074 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1075 a8083063 Iustin Pop
  """Logical unit for removing a node.
1076 a8083063 Iustin Pop

1077 a8083063 Iustin Pop
  """
1078 a8083063 Iustin Pop
  HPATH = "node-remove"
1079 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1080 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1081 a8083063 Iustin Pop
1082 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1083 a8083063 Iustin Pop
    """Build hooks env.
1084 a8083063 Iustin Pop

1085 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1086 a8083063 Iustin Pop
    node would not allows itself to run.
1087 a8083063 Iustin Pop

1088 a8083063 Iustin Pop
    """
1089 396e1b78 Michael Hanselmann
    env = {
1090 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1091 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1092 396e1b78 Michael Hanselmann
      }
1093 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1094 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1095 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1096 a8083063 Iustin Pop
1097 a8083063 Iustin Pop
  def CheckPrereq(self):
1098 a8083063 Iustin Pop
    """Check prerequisites.
1099 a8083063 Iustin Pop

1100 a8083063 Iustin Pop
    This checks:
1101 a8083063 Iustin Pop
     - the node exists in the configuration
1102 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1103 a8083063 Iustin Pop
     - it's not the master
1104 a8083063 Iustin Pop

1105 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1106 a8083063 Iustin Pop

1107 a8083063 Iustin Pop
    """
1108 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1109 a8083063 Iustin Pop
    if node is None:
1110 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1111 a8083063 Iustin Pop
1112 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1113 a8083063 Iustin Pop
1114 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1115 a8083063 Iustin Pop
    if node.name == masternode:
1116 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1117 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1118 a8083063 Iustin Pop
1119 a8083063 Iustin Pop
    for instance_name in instance_list:
1120 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1121 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1122 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1123 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1124 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1125 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1126 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1127 a8083063 Iustin Pop
    self.op.node_name = node.name
1128 a8083063 Iustin Pop
    self.node = node
1129 a8083063 Iustin Pop
1130 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1131 a8083063 Iustin Pop
    """Removes the node from the cluster.
1132 a8083063 Iustin Pop

1133 a8083063 Iustin Pop
    """
1134 a8083063 Iustin Pop
    node = self.node
1135 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1136 a8083063 Iustin Pop
                node.name)
1137 a8083063 Iustin Pop
1138 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1139 a8083063 Iustin Pop
1140 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1141 a8083063 Iustin Pop
1142 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1143 a8083063 Iustin Pop
1144 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1145 a8083063 Iustin Pop
1146 c8a0948f Michael Hanselmann
    _RemoveHostFromEtcHosts(node.name)
1147 c8a0948f Michael Hanselmann
1148 a8083063 Iustin Pop
1149 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1150 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1151 a8083063 Iustin Pop

1152 a8083063 Iustin Pop
  """
1153 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1154 a8083063 Iustin Pop
1155 a8083063 Iustin Pop
  def CheckPrereq(self):
1156 a8083063 Iustin Pop
    """Check prerequisites.
1157 a8083063 Iustin Pop

1158 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1159 a8083063 Iustin Pop

1160 a8083063 Iustin Pop
    """
1161 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1162 3ef10550 Michael Hanselmann
                                     "mtotal", "mnode", "mfree",
1163 3ef10550 Michael Hanselmann
                                     "bootid"])
1164 a8083063 Iustin Pop
1165 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1166 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1167 ec223efb Iustin Pop
                               "pip", "sip"],
1168 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1169 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1170 a8083063 Iustin Pop
1171 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1172 a8083063 Iustin Pop
1173 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1174 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1175 a8083063 Iustin Pop

1176 a8083063 Iustin Pop
    """
1177 246e180a Iustin Pop
    nodenames = self.wanted
1178 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1179 a8083063 Iustin Pop
1180 a8083063 Iustin Pop
    # begin data gathering
1181 a8083063 Iustin Pop
1182 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1183 a8083063 Iustin Pop
      live_data = {}
1184 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1185 a8083063 Iustin Pop
      for name in nodenames:
1186 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1187 a8083063 Iustin Pop
        if nodeinfo:
1188 a8083063 Iustin Pop
          live_data[name] = {
1189 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1190 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1191 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1192 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1193 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1194 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1195 a8083063 Iustin Pop
            }
1196 a8083063 Iustin Pop
        else:
1197 a8083063 Iustin Pop
          live_data[name] = {}
1198 a8083063 Iustin Pop
    else:
1199 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1200 a8083063 Iustin Pop
1201 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1202 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1203 a8083063 Iustin Pop
1204 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1205 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1206 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1207 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1208 a8083063 Iustin Pop
1209 ec223efb Iustin Pop
      for instance_name in instancelist:
1210 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1211 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1212 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1213 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1214 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1215 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1216 a8083063 Iustin Pop
1217 a8083063 Iustin Pop
    # end data gathering
1218 a8083063 Iustin Pop
1219 a8083063 Iustin Pop
    output = []
1220 a8083063 Iustin Pop
    for node in nodelist:
1221 a8083063 Iustin Pop
      node_output = []
1222 a8083063 Iustin Pop
      for field in self.op.output_fields:
1223 a8083063 Iustin Pop
        if field == "name":
1224 a8083063 Iustin Pop
          val = node.name
1225 ec223efb Iustin Pop
        elif field == "pinst_list":
1226 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1227 ec223efb Iustin Pop
        elif field == "sinst_list":
1228 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1229 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1230 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1231 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1232 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1233 a8083063 Iustin Pop
        elif field == "pip":
1234 a8083063 Iustin Pop
          val = node.primary_ip
1235 a8083063 Iustin Pop
        elif field == "sip":
1236 a8083063 Iustin Pop
          val = node.secondary_ip
1237 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1238 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1239 a8083063 Iustin Pop
        else:
1240 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1241 a8083063 Iustin Pop
        node_output.append(val)
1242 a8083063 Iustin Pop
      output.append(node_output)
1243 a8083063 Iustin Pop
1244 a8083063 Iustin Pop
    return output
1245 a8083063 Iustin Pop
1246 a8083063 Iustin Pop
1247 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1248 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1249 dcb93971 Michael Hanselmann

1250 dcb93971 Michael Hanselmann
  """
1251 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1252 dcb93971 Michael Hanselmann
1253 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1254 dcb93971 Michael Hanselmann
    """Check prerequisites.
1255 dcb93971 Michael Hanselmann

1256 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1257 dcb93971 Michael Hanselmann

1258 dcb93971 Michael Hanselmann
    """
1259 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1260 dcb93971 Michael Hanselmann
1261 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1262 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1263 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1264 dcb93971 Michael Hanselmann
1265 dcb93971 Michael Hanselmann
1266 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1267 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1268 dcb93971 Michael Hanselmann

1269 dcb93971 Michael Hanselmann
    """
1270 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1271 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1272 dcb93971 Michael Hanselmann
1273 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1274 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1275 dcb93971 Michael Hanselmann
1276 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1277 dcb93971 Michael Hanselmann
1278 dcb93971 Michael Hanselmann
    output = []
1279 dcb93971 Michael Hanselmann
    for node in nodenames:
1280 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1281 37d19eb2 Michael Hanselmann
        continue
1282 37d19eb2 Michael Hanselmann
1283 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1284 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1285 dcb93971 Michael Hanselmann
1286 dcb93971 Michael Hanselmann
      for vol in node_vols:
1287 dcb93971 Michael Hanselmann
        node_output = []
1288 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1289 dcb93971 Michael Hanselmann
          if field == "node":
1290 dcb93971 Michael Hanselmann
            val = node
1291 dcb93971 Michael Hanselmann
          elif field == "phys":
1292 dcb93971 Michael Hanselmann
            val = vol['dev']
1293 dcb93971 Michael Hanselmann
          elif field == "vg":
1294 dcb93971 Michael Hanselmann
            val = vol['vg']
1295 dcb93971 Michael Hanselmann
          elif field == "name":
1296 dcb93971 Michael Hanselmann
            val = vol['name']
1297 dcb93971 Michael Hanselmann
          elif field == "size":
1298 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1299 dcb93971 Michael Hanselmann
          elif field == "instance":
1300 dcb93971 Michael Hanselmann
            for inst in ilist:
1301 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1302 dcb93971 Michael Hanselmann
                continue
1303 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1304 dcb93971 Michael Hanselmann
                val = inst.name
1305 dcb93971 Michael Hanselmann
                break
1306 dcb93971 Michael Hanselmann
            else:
1307 dcb93971 Michael Hanselmann
              val = '-'
1308 dcb93971 Michael Hanselmann
          else:
1309 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1310 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1311 dcb93971 Michael Hanselmann
1312 dcb93971 Michael Hanselmann
        output.append(node_output)
1313 dcb93971 Michael Hanselmann
1314 dcb93971 Michael Hanselmann
    return output
1315 dcb93971 Michael Hanselmann
1316 dcb93971 Michael Hanselmann
1317 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1318 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1319 a8083063 Iustin Pop

1320 a8083063 Iustin Pop
  """
1321 a8083063 Iustin Pop
  HPATH = "node-add"
1322 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1323 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1324 a8083063 Iustin Pop
1325 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1326 a8083063 Iustin Pop
    """Build hooks env.
1327 a8083063 Iustin Pop

1328 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1329 a8083063 Iustin Pop

1330 a8083063 Iustin Pop
    """
1331 a8083063 Iustin Pop
    env = {
1332 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1333 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1334 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1335 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1336 a8083063 Iustin Pop
      }
1337 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1338 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1339 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1340 a8083063 Iustin Pop
1341 a8083063 Iustin Pop
  def CheckPrereq(self):
1342 a8083063 Iustin Pop
    """Check prerequisites.
1343 a8083063 Iustin Pop

1344 a8083063 Iustin Pop
    This checks:
1345 a8083063 Iustin Pop
     - the new node is not already in the config
1346 a8083063 Iustin Pop
     - it is resolvable
1347 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1348 a8083063 Iustin Pop

1349 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1350 a8083063 Iustin Pop

1351 a8083063 Iustin Pop
    """
1352 a8083063 Iustin Pop
    node_name = self.op.node_name
1353 a8083063 Iustin Pop
    cfg = self.cfg
1354 a8083063 Iustin Pop
1355 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1356 a8083063 Iustin Pop
1357 bcf043c9 Iustin Pop
    node = dns_data.name
1358 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1359 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1360 a8083063 Iustin Pop
    if secondary_ip is None:
1361 a8083063 Iustin Pop
      secondary_ip = primary_ip
1362 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1363 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1364 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1365 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1366 a8083063 Iustin Pop
    if node in node_list:
1367 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1368 3ecf6786 Iustin Pop
                                 % node)
1369 a8083063 Iustin Pop
1370 a8083063 Iustin Pop
    for existing_node_name in node_list:
1371 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1372 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1373 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1374 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1375 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1376 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1377 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1378 a8083063 Iustin Pop
1379 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1380 a8083063 Iustin Pop
    # same as for the master
1381 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1382 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1383 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1384 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1385 a8083063 Iustin Pop
      if master_singlehomed:
1386 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1387 3ecf6786 Iustin Pop
                                   " new node has one")
1388 a8083063 Iustin Pop
      else:
1389 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1390 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1391 a8083063 Iustin Pop
1392 a8083063 Iustin Pop
    # checks reachablity
1393 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(utils.HostInfo().name,
1394 16abfbc2 Alexander Schreiber
                         primary_ip,
1395 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
1396 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1397 a8083063 Iustin Pop
1398 a8083063 Iustin Pop
    if not newbie_singlehomed:
1399 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1400 16abfbc2 Alexander Schreiber
      if not utils.TcpPing(myself.secondary_ip,
1401 16abfbc2 Alexander Schreiber
                           secondary_ip,
1402 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT):
1403 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError(
1404 16abfbc2 Alexander Schreiber
          "Node secondary ip not reachable by TCP based ping to noded port")
1405 a8083063 Iustin Pop
1406 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1407 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1408 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1409 a8083063 Iustin Pop
1410 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1411 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1412 a8083063 Iustin Pop

1413 a8083063 Iustin Pop
    """
1414 a8083063 Iustin Pop
    new_node = self.new_node
1415 a8083063 Iustin Pop
    node = new_node.name
1416 a8083063 Iustin Pop
1417 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1418 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1419 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1420 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1421 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1422 a8083063 Iustin Pop
    try:
1423 a8083063 Iustin Pop
      gntpem = f.read(8192)
1424 a8083063 Iustin Pop
    finally:
1425 a8083063 Iustin Pop
      f.close()
1426 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1427 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1428 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1429 a8083063 Iustin Pop
    # parsed by the shell sequence below
1430 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1431 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1432 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1433 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1434 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1435 a8083063 Iustin Pop
1436 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1437 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1438 a8083063 Iustin Pop
    # either by being constants or by the checks above
1439 a8083063 Iustin Pop
    ss = self.sstore
1440 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1441 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1442 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1443 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1444 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1445 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1446 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1447 a8083063 Iustin Pop
1448 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1449 a8083063 Iustin Pop
    if result.failed:
1450 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1451 3ecf6786 Iustin Pop
                               " output: %s" %
1452 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1453 a8083063 Iustin Pop
1454 a8083063 Iustin Pop
    # check connectivity
1455 a8083063 Iustin Pop
    time.sleep(4)
1456 a8083063 Iustin Pop
1457 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1458 a8083063 Iustin Pop
    if result:
1459 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1460 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1461 a8083063 Iustin Pop
                    (node, result))
1462 a8083063 Iustin Pop
      else:
1463 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1464 3ecf6786 Iustin Pop
                                 " node version %s" %
1465 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1466 a8083063 Iustin Pop
    else:
1467 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1468 a8083063 Iustin Pop
1469 a8083063 Iustin Pop
    # setup ssh on node
1470 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1471 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1472 a8083063 Iustin Pop
    keyarray = []
1473 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1474 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1475 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1476 a8083063 Iustin Pop
1477 a8083063 Iustin Pop
    for i in keyfiles:
1478 a8083063 Iustin Pop
      f = open(i, 'r')
1479 a8083063 Iustin Pop
      try:
1480 a8083063 Iustin Pop
        keyarray.append(f.read())
1481 a8083063 Iustin Pop
      finally:
1482 a8083063 Iustin Pop
        f.close()
1483 a8083063 Iustin Pop
1484 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1485 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1486 a8083063 Iustin Pop
1487 a8083063 Iustin Pop
    if not result:
1488 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1489 a8083063 Iustin Pop
1490 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1491 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(new_node.name)
1492 c8a0948f Michael Hanselmann
1493 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1494 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1495 a8083063 Iustin Pop
1496 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1497 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1498 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1499 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1500 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1501 16abfbc2 Alexander Schreiber
                                    10, False):
1502 3ecf6786 Iustin Pop
        raise errors.OpExecError("Node claims it doesn't have the"
1503 3ecf6786 Iustin Pop
                                 " secondary ip you gave (%s).\n"
1504 3ecf6786 Iustin Pop
                                 "Please fix and re-run this command." %
1505 3ecf6786 Iustin Pop
                                 new_node.secondary_ip)
1506 a8083063 Iustin Pop
1507 ff98055b Iustin Pop
    success, msg = ssh.VerifyNodeHostname(node)
1508 ff98055b Iustin Pop
    if not success:
1509 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1510 ff98055b Iustin Pop
                               " than the one the resolver gives: %s.\n"
1511 ff98055b Iustin Pop
                               "Please fix and re-run this command." %
1512 ff98055b Iustin Pop
                               (node, msg))
1513 ff98055b Iustin Pop
1514 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1515 a8083063 Iustin Pop
    # including the node just added
1516 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1517 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1518 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1519 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1520 a8083063 Iustin Pop
1521 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1522 82122173 Iustin Pop
    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
1523 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1524 a8083063 Iustin Pop
      for to_node in dist_nodes:
1525 a8083063 Iustin Pop
        if not result[to_node]:
1526 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1527 a8083063 Iustin Pop
                       (fname, to_node))
1528 a8083063 Iustin Pop
1529 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1530 a8083063 Iustin Pop
    for fname in to_copy:
1531 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1532 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1533 a8083063 Iustin Pop
1534 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1535 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1536 a8083063 Iustin Pop
1537 a8083063 Iustin Pop
1538 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1539 a8083063 Iustin Pop
  """Failover the master node to the current node.
1540 a8083063 Iustin Pop

1541 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1542 a8083063 Iustin Pop

1543 a8083063 Iustin Pop
  """
1544 a8083063 Iustin Pop
  HPATH = "master-failover"
1545 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1546 a8083063 Iustin Pop
  REQ_MASTER = False
1547 a8083063 Iustin Pop
  _OP_REQP = []
1548 a8083063 Iustin Pop
1549 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1550 a8083063 Iustin Pop
    """Build hooks env.
1551 a8083063 Iustin Pop

1552 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1553 a8083063 Iustin Pop
    the nodes in the post phase.
1554 a8083063 Iustin Pop

1555 a8083063 Iustin Pop
    """
1556 a8083063 Iustin Pop
    env = {
1557 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1558 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1559 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1560 a8083063 Iustin Pop
      }
1561 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1562 a8083063 Iustin Pop
1563 a8083063 Iustin Pop
  def CheckPrereq(self):
1564 a8083063 Iustin Pop
    """Check prerequisites.
1565 a8083063 Iustin Pop

1566 a8083063 Iustin Pop
    This checks that we are not already the master.
1567 a8083063 Iustin Pop

1568 a8083063 Iustin Pop
    """
1569 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1570 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1571 a8083063 Iustin Pop
1572 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1573 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1574 3ecf6786 Iustin Pop
                                 " where you want the new master to be.\n"
1575 3ecf6786 Iustin Pop
                                 "%s is already the master" %
1576 3ecf6786 Iustin Pop
                                 self.old_master)
1577 a8083063 Iustin Pop
1578 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1579 a8083063 Iustin Pop
    """Failover the master node.
1580 a8083063 Iustin Pop

1581 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1582 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1583 a8083063 Iustin Pop
    master.
1584 a8083063 Iustin Pop

1585 a8083063 Iustin Pop
    """
1586 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1587 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1588 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1589 a8083063 Iustin Pop
1590 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1591 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1592 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1593 a8083063 Iustin Pop
1594 880478f8 Iustin Pop
    ss = self.sstore
1595 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1596 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1597 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1598 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1599 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1600 880478f8 Iustin Pop
1601 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1602 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1603 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1604 880478f8 Iustin Pop
      feedback_fn("Error in activating the master IP on the new master,\n"
1605 880478f8 Iustin Pop
                  "please fix manually.")
1606 a8083063 Iustin Pop
1607 a8083063 Iustin Pop
1608 a8083063 Iustin Pop
1609 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1610 a8083063 Iustin Pop
  """Query cluster configuration.
1611 a8083063 Iustin Pop

1612 a8083063 Iustin Pop
  """
1613 a8083063 Iustin Pop
  _OP_REQP = []
1614 59322403 Iustin Pop
  REQ_MASTER = False
1615 a8083063 Iustin Pop
1616 a8083063 Iustin Pop
  def CheckPrereq(self):
1617 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1618 a8083063 Iustin Pop

1619 a8083063 Iustin Pop
    """
1620 a8083063 Iustin Pop
    pass
1621 a8083063 Iustin Pop
1622 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1623 a8083063 Iustin Pop
    """Return cluster config.
1624 a8083063 Iustin Pop

1625 a8083063 Iustin Pop
    """
1626 a8083063 Iustin Pop
    result = {
1627 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1628 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1629 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1630 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1631 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1632 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1633 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1634 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1635 a8083063 Iustin Pop
      }
1636 a8083063 Iustin Pop
1637 a8083063 Iustin Pop
    return result
1638 a8083063 Iustin Pop
1639 a8083063 Iustin Pop
1640 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1641 a8083063 Iustin Pop
  """Copy file to cluster.
1642 a8083063 Iustin Pop

1643 a8083063 Iustin Pop
  """
1644 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1645 a8083063 Iustin Pop
1646 a8083063 Iustin Pop
  def CheckPrereq(self):
1647 a8083063 Iustin Pop
    """Check prerequisites.
1648 a8083063 Iustin Pop

1649 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1650 a8083063 Iustin Pop
    of nodes is valid.
1651 a8083063 Iustin Pop

1652 a8083063 Iustin Pop
    """
1653 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1654 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1655 dcb93971 Michael Hanselmann
1656 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1657 a8083063 Iustin Pop
1658 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1659 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1660 a8083063 Iustin Pop

1661 a8083063 Iustin Pop
    Args:
1662 a8083063 Iustin Pop
      opts - class with options as members
1663 a8083063 Iustin Pop
      args - list containing a single element, the file name
1664 a8083063 Iustin Pop
    Opts used:
1665 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1666 a8083063 Iustin Pop

1667 a8083063 Iustin Pop
    """
1668 a8083063 Iustin Pop
    filename = self.op.filename
1669 a8083063 Iustin Pop
1670 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
1671 a8083063 Iustin Pop
1672 a7ba5e53 Iustin Pop
    for node in self.nodes:
1673 a8083063 Iustin Pop
      if node == myname:
1674 a8083063 Iustin Pop
        continue
1675 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1676 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1677 a8083063 Iustin Pop
1678 a8083063 Iustin Pop
1679 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1680 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1681 a8083063 Iustin Pop

1682 a8083063 Iustin Pop
  """
1683 a8083063 Iustin Pop
  _OP_REQP = []
1684 a8083063 Iustin Pop
1685 a8083063 Iustin Pop
  def CheckPrereq(self):
1686 a8083063 Iustin Pop
    """No prerequisites.
1687 a8083063 Iustin Pop

1688 a8083063 Iustin Pop
    """
1689 a8083063 Iustin Pop
    pass
1690 a8083063 Iustin Pop
1691 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1692 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1693 a8083063 Iustin Pop

1694 a8083063 Iustin Pop
    """
1695 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1696 a8083063 Iustin Pop
1697 a8083063 Iustin Pop
1698 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1699 a8083063 Iustin Pop
  """Run a command on some nodes.
1700 a8083063 Iustin Pop

1701 a8083063 Iustin Pop
  """
1702 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1703 a8083063 Iustin Pop
1704 a8083063 Iustin Pop
  def CheckPrereq(self):
1705 a8083063 Iustin Pop
    """Check prerequisites.
1706 a8083063 Iustin Pop

1707 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1708 a8083063 Iustin Pop

1709 a8083063 Iustin Pop
    """
1710 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1711 a8083063 Iustin Pop
1712 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1713 a8083063 Iustin Pop
    """Run a command on some nodes.
1714 a8083063 Iustin Pop

1715 a8083063 Iustin Pop
    """
1716 a8083063 Iustin Pop
    data = []
1717 a8083063 Iustin Pop
    for node in self.nodes:
1718 a7ba5e53 Iustin Pop
      result = ssh.SSHCall(node, "root", self.op.command)
1719 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1720 a8083063 Iustin Pop
1721 a8083063 Iustin Pop
    return data
1722 a8083063 Iustin Pop
1723 a8083063 Iustin Pop
1724 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1725 a8083063 Iustin Pop
  """Bring up an instance's disks.
1726 a8083063 Iustin Pop

1727 a8083063 Iustin Pop
  """
1728 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1729 a8083063 Iustin Pop
1730 a8083063 Iustin Pop
  def CheckPrereq(self):
1731 a8083063 Iustin Pop
    """Check prerequisites.
1732 a8083063 Iustin Pop

1733 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1734 a8083063 Iustin Pop

1735 a8083063 Iustin Pop
    """
1736 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1737 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1738 a8083063 Iustin Pop
    if instance is None:
1739 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1740 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1741 a8083063 Iustin Pop
    self.instance = instance
1742 a8083063 Iustin Pop
1743 a8083063 Iustin Pop
1744 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1745 a8083063 Iustin Pop
    """Activate the disks.
1746 a8083063 Iustin Pop

1747 a8083063 Iustin Pop
    """
1748 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1749 a8083063 Iustin Pop
    if not disks_ok:
1750 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1751 a8083063 Iustin Pop
1752 a8083063 Iustin Pop
    return disks_info
1753 a8083063 Iustin Pop
1754 a8083063 Iustin Pop
1755 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1756 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1757 a8083063 Iustin Pop

1758 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1759 a8083063 Iustin Pop

1760 a8083063 Iustin Pop
  Args:
1761 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1762 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1763 a8083063 Iustin Pop
                        in an error return from the function
1764 a8083063 Iustin Pop

1765 a8083063 Iustin Pop
  Returns:
1766 a8083063 Iustin Pop
    false if the operation failed
1767 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1768 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1769 a8083063 Iustin Pop
  """
1770 a8083063 Iustin Pop
  device_info = []
1771 a8083063 Iustin Pop
  disks_ok = True
1772 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1773 a8083063 Iustin Pop
    master_result = None
1774 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1775 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1776 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1777 3f78eef2 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk,
1778 3f78eef2 Iustin Pop
                                          instance.name, is_primary)
1779 a8083063 Iustin Pop
      if not result:
1780 a8083063 Iustin Pop
        logger.Error("could not prepare block device %s on node %s (is_pri"
1781 a8083063 Iustin Pop
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1782 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1783 a8083063 Iustin Pop
          disks_ok = False
1784 a8083063 Iustin Pop
      if is_primary:
1785 a8083063 Iustin Pop
        master_result = result
1786 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1787 a8083063 Iustin Pop
                        master_result))
1788 a8083063 Iustin Pop
1789 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1790 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1791 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1792 b352ab5b Iustin Pop
  for disk in instance.disks:
1793 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1794 b352ab5b Iustin Pop
1795 a8083063 Iustin Pop
  return disks_ok, device_info
1796 a8083063 Iustin Pop
1797 a8083063 Iustin Pop
1798 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1799 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1800 3ecf6786 Iustin Pop

1801 3ecf6786 Iustin Pop
  """
1802 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1803 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1804 fe7b0351 Michael Hanselmann
  if not disks_ok:
1805 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1806 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1807 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1808 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1809 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1810 fe7b0351 Michael Hanselmann
1811 fe7b0351 Michael Hanselmann
1812 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1813 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1814 a8083063 Iustin Pop

1815 a8083063 Iustin Pop
  """
1816 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1817 a8083063 Iustin Pop
1818 a8083063 Iustin Pop
  def CheckPrereq(self):
1819 a8083063 Iustin Pop
    """Check prerequisites.
1820 a8083063 Iustin Pop

1821 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1822 a8083063 Iustin Pop

1823 a8083063 Iustin Pop
    """
1824 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1825 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1826 a8083063 Iustin Pop
    if instance is None:
1827 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1828 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1829 a8083063 Iustin Pop
    self.instance = instance
1830 a8083063 Iustin Pop
1831 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1832 a8083063 Iustin Pop
    """Deactivate the disks
1833 a8083063 Iustin Pop

1834 a8083063 Iustin Pop
    """
1835 a8083063 Iustin Pop
    instance = self.instance
1836 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1837 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1838 a8083063 Iustin Pop
    if not type(ins_l) is list:
1839 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1840 3ecf6786 Iustin Pop
                               instance.primary_node)
1841 a8083063 Iustin Pop
1842 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1843 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1844 3ecf6786 Iustin Pop
                               " block devices.")
1845 a8083063 Iustin Pop
1846 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1847 a8083063 Iustin Pop
1848 a8083063 Iustin Pop
1849 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1850 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1851 a8083063 Iustin Pop

1852 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1853 a8083063 Iustin Pop

1854 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1855 a8083063 Iustin Pop
  ignored.
1856 a8083063 Iustin Pop

1857 a8083063 Iustin Pop
  """
1858 a8083063 Iustin Pop
  result = True
1859 a8083063 Iustin Pop
  for disk in instance.disks:
1860 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1861 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1862 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1863 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1864 a8083063 Iustin Pop
                     (disk.iv_name, node))
1865 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1866 a8083063 Iustin Pop
          result = False
1867 a8083063 Iustin Pop
  return result
1868 a8083063 Iustin Pop
1869 a8083063 Iustin Pop
1870 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1871 a8083063 Iustin Pop
  """Starts an instance.
1872 a8083063 Iustin Pop

1873 a8083063 Iustin Pop
  """
1874 a8083063 Iustin Pop
  HPATH = "instance-start"
1875 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1876 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1877 a8083063 Iustin Pop
1878 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1879 a8083063 Iustin Pop
    """Build hooks env.
1880 a8083063 Iustin Pop

1881 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1882 a8083063 Iustin Pop

1883 a8083063 Iustin Pop
    """
1884 a8083063 Iustin Pop
    env = {
1885 a8083063 Iustin Pop
      "FORCE": self.op.force,
1886 a8083063 Iustin Pop
      }
1887 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1888 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1889 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1890 a8083063 Iustin Pop
    return env, nl, nl
1891 a8083063 Iustin Pop
1892 a8083063 Iustin Pop
  def CheckPrereq(self):
1893 a8083063 Iustin Pop
    """Check prerequisites.
1894 a8083063 Iustin Pop

1895 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1896 a8083063 Iustin Pop

1897 a8083063 Iustin Pop
    """
1898 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1899 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1900 a8083063 Iustin Pop
    if instance is None:
1901 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1902 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1903 a8083063 Iustin Pop
1904 a8083063 Iustin Pop
    # check bridges existance
1905 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
1906 a8083063 Iustin Pop
1907 a8083063 Iustin Pop
    self.instance = instance
1908 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1909 a8083063 Iustin Pop
1910 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1911 a8083063 Iustin Pop
    """Start the instance.
1912 a8083063 Iustin Pop

1913 a8083063 Iustin Pop
    """
1914 a8083063 Iustin Pop
    instance = self.instance
1915 a8083063 Iustin Pop
    force = self.op.force
1916 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1917 a8083063 Iustin Pop
1918 a8083063 Iustin Pop
    node_current = instance.primary_node
1919 a8083063 Iustin Pop
1920 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1921 a8083063 Iustin Pop
    if not nodeinfo:
1922 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact node %s for infos" %
1923 3ecf6786 Iustin Pop
                               (node_current))
1924 a8083063 Iustin Pop
1925 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
1926 a8083063 Iustin Pop
    memory = instance.memory
1927 a8083063 Iustin Pop
    if memory > freememory:
1928 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to start instance"
1929 3ecf6786 Iustin Pop
                               " %s on node %s"
1930 3ecf6786 Iustin Pop
                               " needed %s MiB, available %s MiB" %
1931 3ecf6786 Iustin Pop
                               (instance.name, node_current, memory,
1932 3ecf6786 Iustin Pop
                                freememory))
1933 a8083063 Iustin Pop
1934 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
1935 a8083063 Iustin Pop
1936 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
1937 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
1938 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
1939 a8083063 Iustin Pop
1940 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
1941 a8083063 Iustin Pop
1942 a8083063 Iustin Pop
1943 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
1944 bf6929a2 Alexander Schreiber
  """Reboot an instance.
1945 bf6929a2 Alexander Schreiber

1946 bf6929a2 Alexander Schreiber
  """
1947 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
1948 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
1949 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
1950 bf6929a2 Alexander Schreiber
1951 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
1952 bf6929a2 Alexander Schreiber
    """Build hooks env.
1953 bf6929a2 Alexander Schreiber

1954 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
1955 bf6929a2 Alexander Schreiber

1956 bf6929a2 Alexander Schreiber
    """
1957 bf6929a2 Alexander Schreiber
    env = {
1958 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
1959 bf6929a2 Alexander Schreiber
      }
1960 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1961 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1962 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
1963 bf6929a2 Alexander Schreiber
    return env, nl, nl
1964 bf6929a2 Alexander Schreiber
1965 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
1966 bf6929a2 Alexander Schreiber
    """Check prerequisites.
1967 bf6929a2 Alexander Schreiber

1968 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
1969 bf6929a2 Alexander Schreiber

1970 bf6929a2 Alexander Schreiber
    """
1971 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
1972 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
1973 bf6929a2 Alexander Schreiber
    if instance is None:
1974 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
1975 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
1976 bf6929a2 Alexander Schreiber
1977 bf6929a2 Alexander Schreiber
    # check bridges existance
1978 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
1979 bf6929a2 Alexander Schreiber
1980 bf6929a2 Alexander Schreiber
    self.instance = instance
1981 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
1982 bf6929a2 Alexander Schreiber
1983 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
1984 bf6929a2 Alexander Schreiber
    """Reboot the instance.
1985 bf6929a2 Alexander Schreiber

1986 bf6929a2 Alexander Schreiber
    """
1987 bf6929a2 Alexander Schreiber
    instance = self.instance
1988 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
1989 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
1990 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
1991 bf6929a2 Alexander Schreiber
1992 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
1993 bf6929a2 Alexander Schreiber
1994 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
1995 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
1996 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
1997 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
1998 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
1999 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2000 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2001 bf6929a2 Alexander Schreiber
2002 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2003 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2004 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2005 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2006 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2007 bf6929a2 Alexander Schreiber
    else:
2008 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2009 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2010 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2011 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2012 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2013 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2014 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2015 bf6929a2 Alexander Schreiber
2016 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2017 bf6929a2 Alexander Schreiber
2018 bf6929a2 Alexander Schreiber
2019 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2020 a8083063 Iustin Pop
  """Shutdown an instance.
2021 a8083063 Iustin Pop

2022 a8083063 Iustin Pop
  """
2023 a8083063 Iustin Pop
  HPATH = "instance-stop"
2024 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2025 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2026 a8083063 Iustin Pop
2027 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2028 a8083063 Iustin Pop
    """Build hooks env.
2029 a8083063 Iustin Pop

2030 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2031 a8083063 Iustin Pop

2032 a8083063 Iustin Pop
    """
2033 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2034 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2035 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2036 a8083063 Iustin Pop
    return env, nl, nl
2037 a8083063 Iustin Pop
2038 a8083063 Iustin Pop
  def CheckPrereq(self):
2039 a8083063 Iustin Pop
    """Check prerequisites.
2040 a8083063 Iustin Pop

2041 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2042 a8083063 Iustin Pop

2043 a8083063 Iustin Pop
    """
2044 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2045 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2046 a8083063 Iustin Pop
    if instance is None:
2047 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2048 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2049 a8083063 Iustin Pop
    self.instance = instance
2050 a8083063 Iustin Pop
2051 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2052 a8083063 Iustin Pop
    """Shutdown the instance.
2053 a8083063 Iustin Pop

2054 a8083063 Iustin Pop
    """
2055 a8083063 Iustin Pop
    instance = self.instance
2056 a8083063 Iustin Pop
    node_current = instance.primary_node
2057 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2058 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2059 a8083063 Iustin Pop
2060 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2061 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2062 a8083063 Iustin Pop
2063 a8083063 Iustin Pop
2064 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2065 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2066 fe7b0351 Michael Hanselmann

2067 fe7b0351 Michael Hanselmann
  """
2068 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2069 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2070 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2071 fe7b0351 Michael Hanselmann
2072 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2073 fe7b0351 Michael Hanselmann
    """Build hooks env.
2074 fe7b0351 Michael Hanselmann

2075 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2076 fe7b0351 Michael Hanselmann

2077 fe7b0351 Michael Hanselmann
    """
2078 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2079 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2080 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2081 fe7b0351 Michael Hanselmann
    return env, nl, nl
2082 fe7b0351 Michael Hanselmann
2083 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2084 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2085 fe7b0351 Michael Hanselmann

2086 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2087 fe7b0351 Michael Hanselmann

2088 fe7b0351 Michael Hanselmann
    """
2089 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2090 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2091 fe7b0351 Michael Hanselmann
    if instance is None:
2092 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2093 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2094 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2095 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2096 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2097 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2098 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2099 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2100 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2101 fe7b0351 Michael Hanselmann
    if remote_info:
2102 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2103 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2104 3ecf6786 Iustin Pop
                                  instance.primary_node))
2105 d0834de3 Michael Hanselmann
2106 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2107 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2108 d0834de3 Michael Hanselmann
      # OS verification
2109 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2110 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2111 d0834de3 Michael Hanselmann
      if pnode is None:
2112 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2113 3ecf6786 Iustin Pop
                                   self.op.pnode)
2114 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2115 dfa96ded Guido Trotter
      if not os_obj:
2116 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2117 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2118 d0834de3 Michael Hanselmann
2119 fe7b0351 Michael Hanselmann
    self.instance = instance
2120 fe7b0351 Michael Hanselmann
2121 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2122 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2123 fe7b0351 Michael Hanselmann

2124 fe7b0351 Michael Hanselmann
    """
2125 fe7b0351 Michael Hanselmann
    inst = self.instance
2126 fe7b0351 Michael Hanselmann
2127 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2128 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2129 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2130 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2131 d0834de3 Michael Hanselmann
2132 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2133 fe7b0351 Michael Hanselmann
    try:
2134 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2135 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2136 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not install OS for instance %s "
2137 3ecf6786 Iustin Pop
                                 "on node %s" %
2138 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2139 fe7b0351 Michael Hanselmann
    finally:
2140 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2141 fe7b0351 Michael Hanselmann
2142 fe7b0351 Michael Hanselmann
2143 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2144 decd5f45 Iustin Pop
  """Rename an instance.
2145 decd5f45 Iustin Pop

2146 decd5f45 Iustin Pop
  """
2147 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2148 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2149 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2150 decd5f45 Iustin Pop
2151 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2152 decd5f45 Iustin Pop
    """Build hooks env.
2153 decd5f45 Iustin Pop

2154 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2155 decd5f45 Iustin Pop

2156 decd5f45 Iustin Pop
    """
2157 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2158 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2159 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2160 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2161 decd5f45 Iustin Pop
    return env, nl, nl
2162 decd5f45 Iustin Pop
2163 decd5f45 Iustin Pop
  def CheckPrereq(self):
2164 decd5f45 Iustin Pop
    """Check prerequisites.
2165 decd5f45 Iustin Pop

2166 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2167 decd5f45 Iustin Pop

2168 decd5f45 Iustin Pop
    """
2169 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2170 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2171 decd5f45 Iustin Pop
    if instance is None:
2172 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2173 decd5f45 Iustin Pop
                                 self.op.instance_name)
2174 decd5f45 Iustin Pop
    if instance.status != "down":
2175 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2176 decd5f45 Iustin Pop
                                 self.op.instance_name)
2177 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2178 decd5f45 Iustin Pop
    if remote_info:
2179 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2180 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2181 decd5f45 Iustin Pop
                                  instance.primary_node))
2182 decd5f45 Iustin Pop
    self.instance = instance
2183 decd5f45 Iustin Pop
2184 decd5f45 Iustin Pop
    # new name verification
2185 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2186 decd5f45 Iustin Pop
2187 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2188 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2189 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2190 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2191 decd5f45 Iustin Pop
      if not result.failed:
2192 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2193 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2194 decd5f45 Iustin Pop
2195 decd5f45 Iustin Pop
2196 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2197 decd5f45 Iustin Pop
    """Reinstall the instance.
2198 decd5f45 Iustin Pop

2199 decd5f45 Iustin Pop
    """
2200 decd5f45 Iustin Pop
    inst = self.instance
2201 decd5f45 Iustin Pop
    old_name = inst.name
2202 decd5f45 Iustin Pop
2203 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2204 decd5f45 Iustin Pop
2205 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2206 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2207 decd5f45 Iustin Pop
2208 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2209 decd5f45 Iustin Pop
    try:
2210 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2211 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2212 decd5f45 Iustin Pop
        msg = ("Could run OS rename script for instance %s\n"
2213 decd5f45 Iustin Pop
               "on node %s\n"
2214 decd5f45 Iustin Pop
               "(but the instance has been renamed in Ganeti)" %
2215 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2216 decd5f45 Iustin Pop
        logger.Error(msg)
2217 decd5f45 Iustin Pop
    finally:
2218 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2219 decd5f45 Iustin Pop
2220 decd5f45 Iustin Pop
2221 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2222 a8083063 Iustin Pop
  """Remove an instance.
2223 a8083063 Iustin Pop

2224 a8083063 Iustin Pop
  """
2225 a8083063 Iustin Pop
  HPATH = "instance-remove"
2226 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2227 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2228 a8083063 Iustin Pop
2229 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2230 a8083063 Iustin Pop
    """Build hooks env.
2231 a8083063 Iustin Pop

2232 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2233 a8083063 Iustin Pop

2234 a8083063 Iustin Pop
    """
2235 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2236 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2237 a8083063 Iustin Pop
    return env, nl, nl
2238 a8083063 Iustin Pop
2239 a8083063 Iustin Pop
  def CheckPrereq(self):
2240 a8083063 Iustin Pop
    """Check prerequisites.
2241 a8083063 Iustin Pop

2242 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2243 a8083063 Iustin Pop

2244 a8083063 Iustin Pop
    """
2245 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2246 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2247 a8083063 Iustin Pop
    if instance is None:
2248 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2249 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2250 a8083063 Iustin Pop
    self.instance = instance
2251 a8083063 Iustin Pop
2252 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2253 a8083063 Iustin Pop
    """Remove the instance.
2254 a8083063 Iustin Pop

2255 a8083063 Iustin Pop
    """
2256 a8083063 Iustin Pop
    instance = self.instance
2257 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2258 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2259 a8083063 Iustin Pop
2260 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2261 1d67656e Iustin Pop
      if self.op.ignore_failures:
2262 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2263 1d67656e Iustin Pop
      else:
2264 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2265 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2266 a8083063 Iustin Pop
2267 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2268 a8083063 Iustin Pop
2269 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2270 1d67656e Iustin Pop
      if self.op.ignore_failures:
2271 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2272 1d67656e Iustin Pop
      else:
2273 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2274 a8083063 Iustin Pop
2275 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2276 a8083063 Iustin Pop
2277 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2278 a8083063 Iustin Pop
2279 a8083063 Iustin Pop
2280 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2281 a8083063 Iustin Pop
  """Logical unit for querying instances.
2282 a8083063 Iustin Pop

2283 a8083063 Iustin Pop
  """
2284 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2285 a8083063 Iustin Pop
2286 a8083063 Iustin Pop
  def CheckPrereq(self):
2287 a8083063 Iustin Pop
    """Check prerequisites.
2288 a8083063 Iustin Pop

2289 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2290 a8083063 Iustin Pop

2291 a8083063 Iustin Pop
    """
2292 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
2293 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2294 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2295 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2296 644eeef9 Iustin Pop
                               "sda_size", "sdb_size"],
2297 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2298 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2299 a8083063 Iustin Pop
2300 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2301 069dcc86 Iustin Pop
2302 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2303 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2304 a8083063 Iustin Pop

2305 a8083063 Iustin Pop
    """
2306 069dcc86 Iustin Pop
    instance_names = self.wanted
2307 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2308 a8083063 Iustin Pop
                     in instance_names]
2309 a8083063 Iustin Pop
2310 a8083063 Iustin Pop
    # begin data gathering
2311 a8083063 Iustin Pop
2312 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2313 a8083063 Iustin Pop
2314 a8083063 Iustin Pop
    bad_nodes = []
2315 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2316 a8083063 Iustin Pop
      live_data = {}
2317 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2318 a8083063 Iustin Pop
      for name in nodes:
2319 a8083063 Iustin Pop
        result = node_data[name]
2320 a8083063 Iustin Pop
        if result:
2321 a8083063 Iustin Pop
          live_data.update(result)
2322 a8083063 Iustin Pop
        elif result == False:
2323 a8083063 Iustin Pop
          bad_nodes.append(name)
2324 a8083063 Iustin Pop
        # else no instance is alive
2325 a8083063 Iustin Pop
    else:
2326 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2327 a8083063 Iustin Pop
2328 a8083063 Iustin Pop
    # end data gathering
2329 a8083063 Iustin Pop
2330 a8083063 Iustin Pop
    output = []
2331 a8083063 Iustin Pop
    for instance in instance_list:
2332 a8083063 Iustin Pop
      iout = []
2333 a8083063 Iustin Pop
      for field in self.op.output_fields:
2334 a8083063 Iustin Pop
        if field == "name":
2335 a8083063 Iustin Pop
          val = instance.name
2336 a8083063 Iustin Pop
        elif field == "os":
2337 a8083063 Iustin Pop
          val = instance.os
2338 a8083063 Iustin Pop
        elif field == "pnode":
2339 a8083063 Iustin Pop
          val = instance.primary_node
2340 a8083063 Iustin Pop
        elif field == "snodes":
2341 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2342 a8083063 Iustin Pop
        elif field == "admin_state":
2343 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2344 a8083063 Iustin Pop
        elif field == "oper_state":
2345 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2346 8a23d2d3 Iustin Pop
            val = None
2347 a8083063 Iustin Pop
          else:
2348 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2349 a8083063 Iustin Pop
        elif field == "admin_ram":
2350 a8083063 Iustin Pop
          val = instance.memory
2351 a8083063 Iustin Pop
        elif field == "oper_ram":
2352 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2353 8a23d2d3 Iustin Pop
            val = None
2354 a8083063 Iustin Pop
          elif instance.name in live_data:
2355 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2356 a8083063 Iustin Pop
          else:
2357 a8083063 Iustin Pop
            val = "-"
2358 a8083063 Iustin Pop
        elif field == "disk_template":
2359 a8083063 Iustin Pop
          val = instance.disk_template
2360 a8083063 Iustin Pop
        elif field == "ip":
2361 a8083063 Iustin Pop
          val = instance.nics[0].ip
2362 a8083063 Iustin Pop
        elif field == "bridge":
2363 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2364 a8083063 Iustin Pop
        elif field == "mac":
2365 a8083063 Iustin Pop
          val = instance.nics[0].mac
2366 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2367 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2368 644eeef9 Iustin Pop
          if disk is None:
2369 8a23d2d3 Iustin Pop
            val = None
2370 644eeef9 Iustin Pop
          else:
2371 644eeef9 Iustin Pop
            val = disk.size
2372 a8083063 Iustin Pop
        else:
2373 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2374 a8083063 Iustin Pop
        iout.append(val)
2375 a8083063 Iustin Pop
      output.append(iout)
2376 a8083063 Iustin Pop
2377 a8083063 Iustin Pop
    return output
2378 a8083063 Iustin Pop
2379 a8083063 Iustin Pop
2380 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2381 a8083063 Iustin Pop
  """Failover an instance.
2382 a8083063 Iustin Pop

2383 a8083063 Iustin Pop
  """
2384 a8083063 Iustin Pop
  HPATH = "instance-failover"
2385 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2386 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2387 a8083063 Iustin Pop
2388 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2389 a8083063 Iustin Pop
    """Build hooks env.
2390 a8083063 Iustin Pop

2391 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2392 a8083063 Iustin Pop

2393 a8083063 Iustin Pop
    """
2394 a8083063 Iustin Pop
    env = {
2395 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2396 a8083063 Iustin Pop
      }
2397 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2398 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2399 a8083063 Iustin Pop
    return env, nl, nl
2400 a8083063 Iustin Pop
2401 a8083063 Iustin Pop
  def CheckPrereq(self):
2402 a8083063 Iustin Pop
    """Check prerequisites.
2403 a8083063 Iustin Pop

2404 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2405 a8083063 Iustin Pop

2406 a8083063 Iustin Pop
    """
2407 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2408 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2409 a8083063 Iustin Pop
    if instance is None:
2410 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2411 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2412 a8083063 Iustin Pop
2413 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2414 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2415 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2416 2a710df1 Michael Hanselmann
2417 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2418 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2419 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2420 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2421 2a710df1 Michael Hanselmann
2422 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2423 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2424 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2425 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2426 3a7c308e Guido Trotter
    if not info:
2427 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
2428 3ecf6786 Iustin Pop
                                 " from node '%s'" % nodeinfo)
2429 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2430 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Not enough memory on target node %s."
2431 3ecf6786 Iustin Pop
                                 " %d MB available, %d MB required" %
2432 3ecf6786 Iustin Pop
                                 (target_node, info['memory_free'],
2433 3ecf6786 Iustin Pop
                                  instance.memory))
2434 3a7c308e Guido Trotter
2435 a8083063 Iustin Pop
    # check bridge existance
2436 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2437 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2438 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2439 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2440 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2441 a8083063 Iustin Pop
2442 a8083063 Iustin Pop
    self.instance = instance
2443 a8083063 Iustin Pop
2444 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2445 a8083063 Iustin Pop
    """Failover an instance.
2446 a8083063 Iustin Pop

2447 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2448 a8083063 Iustin Pop
    starting it on the secondary.
2449 a8083063 Iustin Pop

2450 a8083063 Iustin Pop
    """
2451 a8083063 Iustin Pop
    instance = self.instance
2452 a8083063 Iustin Pop
2453 a8083063 Iustin Pop
    source_node = instance.primary_node
2454 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2455 a8083063 Iustin Pop
2456 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2457 a8083063 Iustin Pop
    for dev in instance.disks:
2458 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2459 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2460 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2461 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2462 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2463 a8083063 Iustin Pop
2464 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2465 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2466 a8083063 Iustin Pop
2467 a8083063 Iustin Pop
    if not nodeinfo:
2468 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact target node %s." %
2469 3ecf6786 Iustin Pop
                               target_node)
2470 a8083063 Iustin Pop
2471 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2472 a8083063 Iustin Pop
    memory = instance.memory
2473 a8083063 Iustin Pop
    if memory > free_memory:
2474 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to create instance %s on"
2475 3ecf6786 Iustin Pop
                               " node %s. needed %s MiB, available %s MiB" %
2476 3ecf6786 Iustin Pop
                               (instance.name, target_node, memory,
2477 3ecf6786 Iustin Pop
                                free_memory))
2478 a8083063 Iustin Pop
2479 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2480 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2481 a8083063 Iustin Pop
                (instance.name, source_node))
2482 a8083063 Iustin Pop
2483 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2484 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2485 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2486 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2487 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2488 24a40d57 Iustin Pop
      else:
2489 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2490 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2491 a8083063 Iustin Pop
2492 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2493 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2494 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2495 a8083063 Iustin Pop
2496 a8083063 Iustin Pop
    instance.primary_node = target_node
2497 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2498 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2499 a8083063 Iustin Pop
2500 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2501 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2502 a8083063 Iustin Pop
                (instance.name, target_node))
2503 a8083063 Iustin Pop
2504 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2505 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2506 a8083063 Iustin Pop
    if not disks_ok:
2507 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2508 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2509 a8083063 Iustin Pop
2510 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2511 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2512 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2513 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2514 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2515 a8083063 Iustin Pop
2516 a8083063 Iustin Pop
2517 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2518 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2519 a8083063 Iustin Pop

2520 a8083063 Iustin Pop
  This always creates all devices.
2521 a8083063 Iustin Pop

2522 a8083063 Iustin Pop
  """
2523 a8083063 Iustin Pop
  if device.children:
2524 a8083063 Iustin Pop
    for child in device.children:
2525 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2526 a8083063 Iustin Pop
        return False
2527 a8083063 Iustin Pop
2528 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2529 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2530 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2531 a8083063 Iustin Pop
  if not new_id:
2532 a8083063 Iustin Pop
    return False
2533 a8083063 Iustin Pop
  if device.physical_id is None:
2534 a8083063 Iustin Pop
    device.physical_id = new_id
2535 a8083063 Iustin Pop
  return True
2536 a8083063 Iustin Pop
2537 a8083063 Iustin Pop
2538 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2539 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2540 a8083063 Iustin Pop

2541 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2542 a8083063 Iustin Pop
  all its children.
2543 a8083063 Iustin Pop

2544 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2545 a8083063 Iustin Pop

2546 a8083063 Iustin Pop
  """
2547 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2548 a8083063 Iustin Pop
    force = True
2549 a8083063 Iustin Pop
  if device.children:
2550 a8083063 Iustin Pop
    for child in device.children:
2551 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2552 3f78eef2 Iustin Pop
                                        child, force, info):
2553 a8083063 Iustin Pop
        return False
2554 a8083063 Iustin Pop
2555 a8083063 Iustin Pop
  if not force:
2556 a8083063 Iustin Pop
    return True
2557 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2558 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2559 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2560 a8083063 Iustin Pop
  if not new_id:
2561 a8083063 Iustin Pop
    return False
2562 a8083063 Iustin Pop
  if device.physical_id is None:
2563 a8083063 Iustin Pop
    device.physical_id = new_id
2564 a8083063 Iustin Pop
  return True
2565 a8083063 Iustin Pop
2566 a8083063 Iustin Pop
2567 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2568 923b1523 Iustin Pop
  """Generate a suitable LV name.
2569 923b1523 Iustin Pop

2570 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2571 923b1523 Iustin Pop

2572 923b1523 Iustin Pop
  """
2573 923b1523 Iustin Pop
  results = []
2574 923b1523 Iustin Pop
  for val in exts:
2575 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2576 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2577 923b1523 Iustin Pop
  return results
2578 923b1523 Iustin Pop
2579 923b1523 Iustin Pop
2580 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2581 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2582 a8083063 Iustin Pop

2583 a8083063 Iustin Pop
  """
2584 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2585 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2586 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2587 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2588 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2589 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2590 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
2591 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2592 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2593 a8083063 Iustin Pop
  return drbd_dev
2594 a8083063 Iustin Pop
2595 a8083063 Iustin Pop
2596 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2597 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2598 a1f445d3 Iustin Pop

2599 a1f445d3 Iustin Pop
  """
2600 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2601 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2602 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2603 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2604 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2605 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2606 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2607 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2608 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2609 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2610 a1f445d3 Iustin Pop
  return drbd_dev
2611 a1f445d3 Iustin Pop
2612 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2613 a8083063 Iustin Pop
                          instance_name, primary_node,
2614 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2615 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2616 a8083063 Iustin Pop

2617 a8083063 Iustin Pop
  """
2618 a8083063 Iustin Pop
  #TODO: compute space requirements
2619 a8083063 Iustin Pop
2620 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2621 a8083063 Iustin Pop
  if template_name == "diskless":
2622 a8083063 Iustin Pop
    disks = []
2623 a8083063 Iustin Pop
  elif template_name == "plain":
2624 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2625 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2626 923b1523 Iustin Pop
2627 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2628 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2629 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2630 a8083063 Iustin Pop
                           iv_name = "sda")
2631 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2632 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2633 a8083063 Iustin Pop
                           iv_name = "sdb")
2634 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2635 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2636 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2637 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2638 923b1523 Iustin Pop
2639 923b1523 Iustin Pop
2640 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2641 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2642 fe96220b Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2643 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2644 fe96220b Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2645 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2646 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
2647 a8083063 Iustin Pop
                              size=disk_sz,
2648 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2649 fe96220b Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2650 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2651 fe96220b Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2652 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2653 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
2654 a8083063 Iustin Pop
                              size=swap_sz,
2655 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2656 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2657 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2658 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2659 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2660 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2661 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2662 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2663 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2664 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2665 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
2666 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2667 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2668 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2669 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
2670 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2671 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2672 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2673 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2674 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2675 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2676 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2677 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2678 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2679 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2680 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2681 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2682 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2683 a8083063 Iustin Pop
  else:
2684 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2685 a8083063 Iustin Pop
  return disks
2686 a8083063 Iustin Pop
2687 a8083063 Iustin Pop
2688 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2689 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2690 3ecf6786 Iustin Pop

2691 3ecf6786 Iustin Pop
  """
2692 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2693 a0c3fea1 Michael Hanselmann
2694 a0c3fea1 Michael Hanselmann
2695 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2696 a8083063 Iustin Pop
  """Create all disks for an instance.
2697 a8083063 Iustin Pop

2698 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2699 a8083063 Iustin Pop

2700 a8083063 Iustin Pop
  Args:
2701 a8083063 Iustin Pop
    instance: the instance object
2702 a8083063 Iustin Pop

2703 a8083063 Iustin Pop
  Returns:
2704 a8083063 Iustin Pop
    True or False showing the success of the creation process
2705 a8083063 Iustin Pop

2706 a8083063 Iustin Pop
  """
2707 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2708 a0c3fea1 Michael Hanselmann
2709 a8083063 Iustin Pop
  for device in instance.disks:
2710 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2711 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2712 a8083063 Iustin Pop
    #HARDCODE
2713 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2714 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2715 3f78eef2 Iustin Pop
                                        device, False, info):
2716 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2717 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2718 a8083063 Iustin Pop
        return False
2719 a8083063 Iustin Pop
    #HARDCODE
2720 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2721 3f78eef2 Iustin Pop
                                    instance, device, info):
2722 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2723 a8083063 Iustin Pop
                   device.iv_name)
2724 a8083063 Iustin Pop
      return False
2725 a8083063 Iustin Pop
  return True
2726 a8083063 Iustin Pop
2727 a8083063 Iustin Pop
2728 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2729 a8083063 Iustin Pop
  """Remove all disks for an instance.
2730 a8083063 Iustin Pop

2731 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2732 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2733 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2734 a8083063 Iustin Pop
  with `_CreateDisks()`).
2735 a8083063 Iustin Pop

2736 a8083063 Iustin Pop
  Args:
2737 a8083063 Iustin Pop
    instance: the instance object
2738 a8083063 Iustin Pop

2739 a8083063 Iustin Pop
  Returns:
2740 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2741 a8083063 Iustin Pop

2742 a8083063 Iustin Pop
  """
2743 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2744 a8083063 Iustin Pop
2745 a8083063 Iustin Pop
  result = True
2746 a8083063 Iustin Pop
  for device in instance.disks:
2747 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2748 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2749 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2750 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2751 a8083063 Iustin Pop
                     " continuing anyway" %
2752 a8083063 Iustin Pop
                     (device.iv_name, node))
2753 a8083063 Iustin Pop
        result = False
2754 a8083063 Iustin Pop
  return result
2755 a8083063 Iustin Pop
2756 a8083063 Iustin Pop
2757 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2758 a8083063 Iustin Pop
  """Create an instance.
2759 a8083063 Iustin Pop

2760 a8083063 Iustin Pop
  """
2761 a8083063 Iustin Pop
  HPATH = "instance-add"
2762 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2763 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2764 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2765 bdd55f71 Iustin Pop
              "wait_for_sync", "ip_check"]
2766 a8083063 Iustin Pop
2767 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2768 a8083063 Iustin Pop
    """Build hooks env.
2769 a8083063 Iustin Pop

2770 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2771 a8083063 Iustin Pop

2772 a8083063 Iustin Pop
    """
2773 a8083063 Iustin Pop
    env = {
2774 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2775 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2776 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2777 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2778 a8083063 Iustin Pop
      }
2779 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2780 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2781 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2782 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2783 396e1b78 Michael Hanselmann
2784 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2785 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2786 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2787 396e1b78 Michael Hanselmann
      status=self.instance_status,
2788 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2789 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2790 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2791 396e1b78 Michael Hanselmann
      nics=[(self.inst_ip, self.op.bridge)],
2792 396e1b78 Michael Hanselmann
    ))
2793 a8083063 Iustin Pop
2794 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2795 a8083063 Iustin Pop
          self.secondaries)
2796 a8083063 Iustin Pop
    return env, nl, nl
2797 a8083063 Iustin Pop
2798 a8083063 Iustin Pop
2799 a8083063 Iustin Pop
  def CheckPrereq(self):
2800 a8083063 Iustin Pop
    """Check prerequisites.
2801 a8083063 Iustin Pop

2802 a8083063 Iustin Pop
    """
2803 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2804 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2805 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2806 3ecf6786 Iustin Pop
                                 self.op.mode)
2807 a8083063 Iustin Pop
2808 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2809 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2810 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2811 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2812 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2813 3ecf6786 Iustin Pop
                                   " node and path options")
2814 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2815 a8083063 Iustin Pop
      if src_node_full is None:
2816 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2817 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2818 a8083063 Iustin Pop
2819 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2820 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2821 a8083063 Iustin Pop
2822 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2823 a8083063 Iustin Pop
2824 a8083063 Iustin Pop
      if not export_info:
2825 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2826 a8083063 Iustin Pop
2827 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2828 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2829 a8083063 Iustin Pop
2830 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2831 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2832 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2833 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2834 a8083063 Iustin Pop
2835 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2836 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2837 3ecf6786 Iustin Pop
                                   " one data disk")
2838 a8083063 Iustin Pop
2839 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2840 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2841 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2842 a8083063 Iustin Pop
                                                         'disk0_dump'))
2843 a8083063 Iustin Pop
      self.src_image = diskimage
2844 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2845 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2846 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2847 a8083063 Iustin Pop
2848 a8083063 Iustin Pop
    # check primary node
2849 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2850 a8083063 Iustin Pop
    if pnode is None:
2851 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2852 3ecf6786 Iustin Pop
                                 self.op.pnode)
2853 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2854 a8083063 Iustin Pop
    self.pnode = pnode
2855 a8083063 Iustin Pop
    self.secondaries = []
2856 a8083063 Iustin Pop
    # disk template and mirror node verification
2857 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2858 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2859 a8083063 Iustin Pop
2860 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
2861 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2862 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
2863 3ecf6786 Iustin Pop
                                   " a mirror node")
2864 a8083063 Iustin Pop
2865 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2866 a8083063 Iustin Pop
      if snode_name is None:
2867 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2868 3ecf6786 Iustin Pop
                                   self.op.snode)
2869 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2870 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
2871 3ecf6786 Iustin Pop
                                   " the primary node.")
2872 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2873 a8083063 Iustin Pop
2874 ed1ebc60 Guido Trotter
    # Check lv size requirements
2875 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2876 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2877 ed1ebc60 Guido Trotter
2878 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2879 ed1ebc60 Guido Trotter
    req_size_dict = {
2880 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2881 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2882 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2883 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2884 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2885 a1f445d3 Iustin Pop
      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
2886 ed1ebc60 Guido Trotter
    }
2887 ed1ebc60 Guido Trotter
2888 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2889 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
2890 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
2891 ed1ebc60 Guido Trotter
2892 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2893 ed1ebc60 Guido Trotter
2894 ed1ebc60 Guido Trotter
    for node in nodenames:
2895 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2896 ed1ebc60 Guido Trotter
      if not info:
2897 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
2898 3ecf6786 Iustin Pop
                                   " from node '%s'" % nodeinfo)
2899 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2900 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s."
2901 3ecf6786 Iustin Pop
                                   " %d MB available, %d MB required" %
2902 3ecf6786 Iustin Pop
                                   (node, info['vg_free'], req_size))
2903 ed1ebc60 Guido Trotter
2904 a8083063 Iustin Pop
    # os verification
2905 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2906 dfa96ded Guido Trotter
    if not os_obj:
2907 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
2908 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
2909 a8083063 Iustin Pop
2910 a8083063 Iustin Pop
    # instance verification
2911 89e1fc26 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
2912 a8083063 Iustin Pop
2913 bcf043c9 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
2914 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2915 a8083063 Iustin Pop
    if instance_name in instance_list:
2916 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2917 3ecf6786 Iustin Pop
                                 instance_name)
2918 a8083063 Iustin Pop
2919 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2920 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2921 a8083063 Iustin Pop
      inst_ip = None
2922 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2923 bcf043c9 Iustin Pop
      inst_ip = hostname1.ip
2924 a8083063 Iustin Pop
    else:
2925 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2926 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
2927 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
2928 a8083063 Iustin Pop
      inst_ip = ip
2929 a8083063 Iustin Pop
    self.inst_ip = inst_ip
2930 a8083063 Iustin Pop
2931 bdd55f71 Iustin Pop
    if self.op.start and not self.op.ip_check:
2932 bdd55f71 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
2933 bdd55f71 Iustin Pop
                                 " adding an instance in start mode")
2934 bdd55f71 Iustin Pop
2935 bdd55f71 Iustin Pop
    if self.op.ip_check:
2936 16abfbc2 Alexander Schreiber
      if utils.TcpPing(utils.HostInfo().name, hostname1.ip,
2937 16abfbc2 Alexander Schreiber
                       constants.DEFAULT_NODED_PORT):
2938 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2939 16abfbc2 Alexander Schreiber
                                   (hostname1.ip, instance_name))
2940 a8083063 Iustin Pop
2941 a8083063 Iustin Pop
    # bridge verification
2942 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
2943 a8083063 Iustin Pop
    if bridge is None:
2944 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
2945 a8083063 Iustin Pop
    else:
2946 a8083063 Iustin Pop
      self.op.bridge = bridge
2947 a8083063 Iustin Pop
2948 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
2949 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
2950 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
2951 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
2952 a8083063 Iustin Pop
2953 a8083063 Iustin Pop
    if self.op.start:
2954 a8083063 Iustin Pop
      self.instance_status = 'up'
2955 a8083063 Iustin Pop
    else:
2956 a8083063 Iustin Pop
      self.instance_status = 'down'
2957 a8083063 Iustin Pop
2958 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2959 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
2960 a8083063 Iustin Pop

2961 a8083063 Iustin Pop
    """
2962 a8083063 Iustin Pop
    instance = self.op.instance_name
2963 a8083063 Iustin Pop
    pnode_name = self.pnode.name
2964 a8083063 Iustin Pop
2965 a8083063 Iustin Pop
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
2966 a8083063 Iustin Pop
    if self.inst_ip is not None:
2967 a8083063 Iustin Pop
      nic.ip = self.inst_ip
2968 a8083063 Iustin Pop
2969 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
2970 a8083063 Iustin Pop
                                  self.op.disk_template,
2971 a8083063 Iustin Pop
                                  instance, pnode_name,
2972 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
2973 a8083063 Iustin Pop
                                  self.op.swap_size)
2974 a8083063 Iustin Pop
2975 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
2976 a8083063 Iustin Pop
                            primary_node=pnode_name,
2977 a8083063 Iustin Pop
                            memory=self.op.mem_size,
2978 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
2979 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
2980 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
2981 a8083063 Iustin Pop
                            status=self.instance_status,
2982 a8083063 Iustin Pop
                            )
2983 a8083063 Iustin Pop
2984 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
2985 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
2986 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2987 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
2988 a8083063 Iustin Pop
2989 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
2990 a8083063 Iustin Pop
2991 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
2992 a8083063 Iustin Pop
2993 a8083063 Iustin Pop
    if self.op.wait_for_sync:
2994 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
2995 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
2996 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
2997 a8083063 Iustin Pop
      time.sleep(15)
2998 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
2999 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3000 a8083063 Iustin Pop
    else:
3001 a8083063 Iustin Pop
      disk_abort = False
3002 a8083063 Iustin Pop
3003 a8083063 Iustin Pop
    if disk_abort:
3004 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3005 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3006 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3007 3ecf6786 Iustin Pop
                               " this instance")
3008 a8083063 Iustin Pop
3009 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3010 a8083063 Iustin Pop
                (instance, pnode_name))
3011 a8083063 Iustin Pop
3012 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3013 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3014 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3015 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3016 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3017 3ecf6786 Iustin Pop
                                   " on node %s" %
3018 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3019 a8083063 Iustin Pop
3020 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3021 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3022 a8083063 Iustin Pop
        src_node = self.op.src_node
3023 a8083063 Iustin Pop
        src_image = self.src_image
3024 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3025 a8083063 Iustin Pop
                                                src_node, src_image):
3026 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3027 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3028 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3029 a8083063 Iustin Pop
      else:
3030 a8083063 Iustin Pop
        # also checked in the prereq part
3031 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3032 3ecf6786 Iustin Pop
                                     % self.op.mode)
3033 a8083063 Iustin Pop
3034 a8083063 Iustin Pop
    if self.op.start:
3035 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3036 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3037 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3038 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3039 a8083063 Iustin Pop
3040 a8083063 Iustin Pop
3041 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3042 a8083063 Iustin Pop
  """Connect to an instance's console.
3043 a8083063 Iustin Pop

3044 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3045 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3046 a8083063 Iustin Pop
  console.
3047 a8083063 Iustin Pop

3048 a8083063 Iustin Pop
  """
3049 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3050 a8083063 Iustin Pop
3051 a8083063 Iustin Pop
  def CheckPrereq(self):
3052 a8083063 Iustin Pop
    """Check prerequisites.
3053 a8083063 Iustin Pop

3054 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3055 a8083063 Iustin Pop

3056 a8083063 Iustin Pop
    """
3057 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3058 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3059 a8083063 Iustin Pop
    if instance is None:
3060 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3061 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3062 a8083063 Iustin Pop
    self.instance = instance
3063 a8083063 Iustin Pop
3064 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3065 a8083063 Iustin Pop
    """Connect to the console of an instance
3066 a8083063 Iustin Pop

3067 a8083063 Iustin Pop
    """
3068 a8083063 Iustin Pop
    instance = self.instance
3069 a8083063 Iustin Pop
    node = instance.primary_node
3070 a8083063 Iustin Pop
3071 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3072 a8083063 Iustin Pop
    if node_insts is False:
3073 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3074 a8083063 Iustin Pop
3075 a8083063 Iustin Pop
    if instance.name not in node_insts:
3076 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3077 a8083063 Iustin Pop
3078 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3079 a8083063 Iustin Pop
3080 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3081 a8083063 Iustin Pop
    console_cmd = hyper.GetShellCommandForConsole(instance.name)
3082 82122173 Iustin Pop
    # build ssh cmdline
3083 82122173 Iustin Pop
    argv = ["ssh", "-q", "-t"]
3084 82122173 Iustin Pop
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
3085 82122173 Iustin Pop
    argv.extend(ssh.BATCH_MODE_OPTS)
3086 82122173 Iustin Pop
    argv.append(node)
3087 82122173 Iustin Pop
    argv.append(console_cmd)
3088 82122173 Iustin Pop
    return "ssh", argv
3089 a8083063 Iustin Pop
3090 a8083063 Iustin Pop
3091 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
3092 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
3093 a8083063 Iustin Pop

3094 a8083063 Iustin Pop
  """
3095 a8083063 Iustin Pop
  HPATH = "mirror-add"
3096 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3097 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3098 a8083063 Iustin Pop
3099 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3100 a8083063 Iustin Pop
    """Build hooks env.
3101 a8083063 Iustin Pop

3102 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3103 a8083063 Iustin Pop

3104 a8083063 Iustin Pop
    """
3105 a8083063 Iustin Pop
    env = {
3106 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3107 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3108 a8083063 Iustin Pop
      }
3109 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3110 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3111 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3112 a8083063 Iustin Pop
    return env, nl, nl
3113 a8083063 Iustin Pop
3114 a8083063 Iustin Pop
  def CheckPrereq(self):
3115 a8083063 Iustin Pop
    """Check prerequisites.
3116 a8083063 Iustin Pop

3117 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3118 a8083063 Iustin Pop

3119 a8083063 Iustin Pop
    """
3120 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3121 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3122 a8083063 Iustin Pop
    if instance is None:
3123 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3124 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3125 a8083063 Iustin Pop
    self.instance = instance
3126 a8083063 Iustin Pop
3127 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3128 a8083063 Iustin Pop
    if remote_node is None:
3129 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3130 a8083063 Iustin Pop
    self.remote_node = remote_node
3131 a8083063 Iustin Pop
3132 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3133 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3134 3ecf6786 Iustin Pop
                                 " the instance.")
3135 a8083063 Iustin Pop
3136 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3137 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3138 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3139 a8083063 Iustin Pop
    for disk in instance.disks:
3140 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3141 a8083063 Iustin Pop
        break
3142 a8083063 Iustin Pop
    else:
3143 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3144 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3145 a8083063 Iustin Pop
    if len(disk.children) > 1:
3146 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The device already has two slave"
3147 3ecf6786 Iustin Pop
                                 " devices.\n"
3148 3ecf6786 Iustin Pop
                                 "This would create a 3-disk raid1"
3149 3ecf6786 Iustin Pop
                                 " which we don't allow.")
3150 a8083063 Iustin Pop
    self.disk = disk
3151 a8083063 Iustin Pop
3152 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3153 a8083063 Iustin Pop
    """Add the mirror component
3154 a8083063 Iustin Pop

3155 a8083063 Iustin Pop
    """
3156 a8083063 Iustin Pop
    disk = self.disk
3157 a8083063 Iustin Pop
    instance = self.instance
3158 a8083063 Iustin Pop
3159 a8083063 Iustin Pop
    remote_node = self.remote_node
3160 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3161 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
3162 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3163 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
3164 a8083063 Iustin Pop
3165 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
3166 a8083063 Iustin Pop
    #HARDCODE
3167 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, instance,
3168 3f78eef2 Iustin Pop
                                      new_drbd, False,
3169 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3170 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
3171 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
3172 a8083063 Iustin Pop
3173 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
3174 a8083063 Iustin Pop
    #HARDCODE
3175 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node,
3176 3f78eef2 Iustin Pop
                                    instance, new_drbd,
3177 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
3178 a8083063 Iustin Pop
      # remove secondary dev
3179 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3180 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
3181 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
3182 a8083063 Iustin Pop
3183 a8083063 Iustin Pop
    # the device exists now
3184 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
3185 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
3186 153d9724 Iustin Pop
    if not rpc.call_blockdev_addchildren(instance.primary_node,
3187 153d9724 Iustin Pop
                                         disk, [new_drbd]):
3188 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
3189 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3190 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3191 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
3192 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3193 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3194 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
3195 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
3196 a8083063 Iustin Pop
3197 a8083063 Iustin Pop
    disk.children.append(new_drbd)
3198 a8083063 Iustin Pop
3199 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3200 a8083063 Iustin Pop
3201 5bfac263 Iustin Pop
    _WaitForSync(self.cfg, instance, self.proc)
3202 a8083063 Iustin Pop
3203 a8083063 Iustin Pop
    return 0
3204 a8083063 Iustin Pop
3205 a8083063 Iustin Pop
3206 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
3207 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
3208 a8083063 Iustin Pop

3209 a8083063 Iustin Pop
  """
3210 a8083063 Iustin Pop
  HPATH = "mirror-remove"
3211 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3212 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3213 a8083063 Iustin Pop
3214 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3215 a8083063 Iustin Pop
    """Build hooks env.
3216 a8083063 Iustin Pop

3217 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3218 a8083063 Iustin Pop

3219 a8083063 Iustin Pop
    """
3220 a8083063 Iustin Pop
    env = {
3221 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3222 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
3223 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
3224 a8083063 Iustin Pop
      }
3225 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3226 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3227 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3228 a8083063 Iustin Pop
    return env, nl, nl
3229 a8083063 Iustin Pop
3230 a8083063 Iustin Pop
  def CheckPrereq(self):
3231 a8083063 Iustin Pop
    """Check prerequisites.
3232 a8083063 Iustin Pop

3233 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3234 a8083063 Iustin Pop

3235 a8083063 Iustin Pop
    """
3236 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3237 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3238 a8083063 Iustin Pop
    if instance is None:
3239 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3240 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3241 a8083063 Iustin Pop
    self.instance = instance
3242 a8083063 Iustin Pop
3243 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3244 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3245 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3246 a8083063 Iustin Pop
    for disk in instance.disks:
3247 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3248 a8083063 Iustin Pop
        break
3249 a8083063 Iustin Pop
    else:
3250 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3251 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3252 a8083063 Iustin Pop
    for child in disk.children:
3253 fe96220b Iustin Pop
      if (child.dev_type == constants.LD_DRBD7 and
3254 fe96220b Iustin Pop
          child.logical_id[2] == self.op.disk_id):
3255 a8083063 Iustin Pop
        break
3256 a8083063 Iustin Pop
    else:
3257 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3258 a8083063 Iustin Pop
3259 a8083063 Iustin Pop
    if len(disk.children) < 2:
3260 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3261 3ecf6786 Iustin Pop
                                 " a mirror.")
3262 a8083063 Iustin Pop
    self.disk = disk
3263 a8083063 Iustin Pop
    self.child = child
3264 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3265 a8083063 Iustin Pop
      oid = 1
3266 a8083063 Iustin Pop
    else:
3267 a8083063 Iustin Pop
      oid = 0
3268 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3269 a8083063 Iustin Pop
3270 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3271 a8083063 Iustin Pop
    """Remove the mirror component
3272 a8083063 Iustin Pop

3273 a8083063 Iustin Pop
    """
3274 a8083063 Iustin Pop
    instance = self.instance
3275 a8083063 Iustin Pop
    disk = self.disk
3276 a8083063 Iustin Pop
    child = self.child
3277 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3278 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3279 153d9724 Iustin Pop
    if not rpc.call_blockdev_removechildren(instance.primary_node,
3280 153d9724 Iustin Pop
                                            disk, [child]):
3281 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3282 a8083063 Iustin Pop
3283 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3284 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3285 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3286 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3287 a8083063 Iustin Pop
                     " continuing operation." % node)
3288 a8083063 Iustin Pop
3289 a8083063 Iustin Pop
    disk.children.remove(child)
3290 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3291 a8083063 Iustin Pop
3292 a8083063 Iustin Pop
3293 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3294 a8083063 Iustin Pop
  """Replace the disks of an instance.
3295 a8083063 Iustin Pop

3296 a8083063 Iustin Pop
  """
3297 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3298 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3299 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3300 a8083063 Iustin Pop
3301 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3302 a8083063 Iustin Pop
    """Build hooks env.
3303 a8083063 Iustin Pop

3304 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3305 a8083063 Iustin Pop

3306 a8083063 Iustin Pop
    """
3307 a8083063 Iustin Pop
    env = {
3308 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3309 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3310 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3311 a8083063 Iustin Pop
      }
3312 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3313 0834c866 Iustin Pop
    nl = [
3314 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3315 0834c866 Iustin Pop
      self.instance.primary_node,
3316 0834c866 Iustin Pop
      ]
3317 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3318 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3319 a8083063 Iustin Pop
    return env, nl, nl
3320 a8083063 Iustin Pop
3321 a8083063 Iustin Pop
  def CheckPrereq(self):
3322 a8083063 Iustin Pop
    """Check prerequisites.
3323 a8083063 Iustin Pop

3324 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3325 a8083063 Iustin Pop

3326 a8083063 Iustin Pop
    """
3327 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3328 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3329 a8083063 Iustin Pop
    if instance is None:
3330 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3331 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3332 a8083063 Iustin Pop
    self.instance = instance
3333 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3334 a8083063 Iustin Pop
3335 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3336 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3337 a9e0c397 Iustin Pop
                                 " network mirrored.")
3338 a8083063 Iustin Pop
3339 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3340 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3341 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3342 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3343 a8083063 Iustin Pop
3344 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3345 a9e0c397 Iustin Pop
3346 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3347 a9e0c397 Iustin Pop
    if remote_node is not None:
3348 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3349 a8083063 Iustin Pop
      if remote_node is None:
3350 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3351 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3352 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3353 a9e0c397 Iustin Pop
    else:
3354 a9e0c397 Iustin Pop
      self.remote_node_info = None
3355 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3356 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3357 3ecf6786 Iustin Pop
                                 " the instance.")
3358 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3359 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3360 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3361 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3362 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3363 0834c866 Iustin Pop
                                   " replacement")
3364 a9e0c397 Iustin Pop
      # the user gave the current secondary, switch to
3365 0834c866 Iustin Pop
      # 'no-replace-secondary' mode for drbd7
3366 a9e0c397 Iustin Pop
      remote_node = None
3367 a9e0c397 Iustin Pop
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3368 a9e0c397 Iustin Pop
        self.op.mode != constants.REPLACE_DISK_ALL):
3369 a9e0c397 Iustin Pop
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3370 a9e0c397 Iustin Pop
                                 " disks replacement, not individual ones")
3371 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3372 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3373 7df43a76 Iustin Pop
          remote_node is not None):
3374 7df43a76 Iustin Pop
        # switch to replace secondary mode
3375 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3376 7df43a76 Iustin Pop
3377 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3378 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3379 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3380 a9e0c397 Iustin Pop
                                   " both at once")
3381 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3382 a9e0c397 Iustin Pop
        if remote_node is not None:
3383 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3384 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3385 a9e0c397 Iustin Pop
                                     " node disk replacement")
3386 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3387 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3388 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3389 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3390 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3391 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3392 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3393 a9e0c397 Iustin Pop
      else:
3394 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3395 a9e0c397 Iustin Pop
3396 a9e0c397 Iustin Pop
    for name in self.op.disks:
3397 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3398 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3399 a9e0c397 Iustin Pop
                                   (name, instance.name))
3400 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3401 a8083063 Iustin Pop
3402 a9e0c397 Iustin Pop
  def _ExecRR1(self, feedback_fn):
3403 a8083063 Iustin Pop
    """Replace the disks of an instance.
3404 a8083063 Iustin Pop

3405 a8083063 Iustin Pop
    """
3406 a8083063 Iustin Pop
    instance = self.instance
3407 a8083063 Iustin Pop
    iv_names = {}
3408 a8083063 Iustin Pop
    # start of work
3409 a9e0c397 Iustin Pop
    if self.op.remote_node is None:
3410 a9e0c397 Iustin Pop
      remote_node = self.sec_node
3411 a9e0c397 Iustin Pop
    else:
3412 a9e0c397 Iustin Pop
      remote_node = self.op.remote_node
3413 a8083063 Iustin Pop
    cfg = self.cfg
3414 a8083063 Iustin Pop
    for dev in instance.disks:
3415 a8083063 Iustin Pop
      size = dev.size
3416 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3417 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3418 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3419 923b1523 Iustin Pop
                                       remote_node, size, names)
3420 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3421 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3422 a8083063 Iustin Pop
                  dev.iv_name)
3423 a8083063 Iustin Pop
      #HARDCODE
3424 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3425 3f78eef2 Iustin Pop
                                        new_drbd, False,
3426 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3427 3ecf6786 Iustin Pop
        raise errors.OpExecError("Failed to create new component on"
3428 3ecf6786 Iustin Pop
                                 " secondary node %s\n"
3429 3ecf6786 Iustin Pop
                                 "Full abort, cleanup manually!" %
3430 3ecf6786 Iustin Pop
                                 remote_node)
3431 a8083063 Iustin Pop
3432 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3433 a8083063 Iustin Pop
      #HARDCODE
3434 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3435 3f78eef2 Iustin Pop
                                      instance, new_drbd,
3436 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3437 a8083063 Iustin Pop
        # remove secondary dev
3438 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3439 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3440 a8083063 Iustin Pop
        raise errors.OpExecError("Failed to create volume on primary!\n"
3441 a8083063 Iustin Pop
                                 "Full abort, cleanup manually!!")
3442 a8083063 Iustin Pop
3443 a8083063 Iustin Pop
      # the device exists now
3444 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3445 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3446 153d9724 Iustin Pop
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3447 153d9724 Iustin Pop
                                           [new_drbd]):
3448 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3449 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3450 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3451 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3452 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3453 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3454 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3455 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3456 a8083063 Iustin Pop
3457 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3458 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3459 a8083063 Iustin Pop
3460 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3461 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3462 a8083063 Iustin Pop
    # return value
3463 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3464 a8083063 Iustin Pop
3465 a8083063 Iustin Pop
    # so check manually all the devices
3466 a8083063 Iustin Pop
    for name in iv_names:
3467 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3468 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3469 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3470 a8083063 Iustin Pop
      if is_degr:
3471 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3472 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3473 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3474 a8083063 Iustin Pop
      if is_degr:
3475 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3476 a8083063 Iustin Pop
3477 a8083063 Iustin Pop
    for name in iv_names:
3478 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3479 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3480 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3481 153d9724 Iustin Pop
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3482 153d9724 Iustin Pop
                                              dev, [child]):
3483 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3484 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3485 a8083063 Iustin Pop
        continue
3486 a8083063 Iustin Pop
3487 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3488 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3489 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3490 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3491 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3492 a8083063 Iustin Pop
                       " continuing operation." % node)
3493 a8083063 Iustin Pop
3494 a8083063 Iustin Pop
      dev.children.remove(child)
3495 a8083063 Iustin Pop
3496 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3497 a8083063 Iustin Pop
3498 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3499 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3500 a9e0c397 Iustin Pop

3501 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3502 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3503 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3504 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3505 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3506 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3507 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3508 a9e0c397 Iustin Pop
      - wait for sync across all devices
3509 a9e0c397 Iustin Pop
      - for each modified disk:
3510 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3511 a9e0c397 Iustin Pop

3512 a9e0c397 Iustin Pop
    Failures are not very well handled.
3513 cff90b79 Iustin Pop

3514 a9e0c397 Iustin Pop
    """
3515 cff90b79 Iustin Pop
    steps_total = 6
3516 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3517 a9e0c397 Iustin Pop
    instance = self.instance
3518 a9e0c397 Iustin Pop
    iv_names = {}
3519 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3520 a9e0c397 Iustin Pop
    # start of work
3521 a9e0c397 Iustin Pop
    cfg = self.cfg
3522 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3523 cff90b79 Iustin Pop
    oth_node = self.oth_node
3524 cff90b79 Iustin Pop
3525 cff90b79 Iustin Pop
    # Step: check device activation
3526 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3527 cff90b79 Iustin Pop
    info("checking volume groups")
3528 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3529 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3530 cff90b79 Iustin Pop
    if not results:
3531 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3532 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3533 cff90b79 Iustin Pop
      res = results.get(node, False)
3534 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3535 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3536 cff90b79 Iustin Pop
                                 (my_vg, node))
3537 cff90b79 Iustin Pop
    for dev in instance.disks:
3538 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3539 cff90b79 Iustin Pop
        continue
3540 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3541 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3542 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3543 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3544 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3545 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3546 cff90b79 Iustin Pop
3547 cff90b79 Iustin Pop
    # Step: check other node consistency
3548 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3549 cff90b79 Iustin Pop
    for dev in instance.disks:
3550 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3551 cff90b79 Iustin Pop
        continue
3552 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3553 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3554 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3555 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3556 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3557 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3558 cff90b79 Iustin Pop
3559 cff90b79 Iustin Pop
    # Step: create new storage
3560 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3561 a9e0c397 Iustin Pop
    for dev in instance.disks:
3562 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3563 a9e0c397 Iustin Pop
        continue
3564 a9e0c397 Iustin Pop
      size = dev.size
3565 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3566 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3567 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3568 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3569 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3570 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3571 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3572 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3573 a9e0c397 Iustin Pop
      old_lvs = dev.children
3574 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3575 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3576 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3577 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3578 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3579 a9e0c397 Iustin Pop
      # are talking about the secondary node
3580 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3581 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3582 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3583 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3584 a9e0c397 Iustin Pop
                                   " node '%s'" %
3585 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3586 a9e0c397 Iustin Pop
3587 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3588 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3589 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3590 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3591 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3592 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3593 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3594 cff90b79 Iustin Pop
      #dev.children = []
3595 cff90b79 Iustin Pop
      #cfg.Update(instance)
3596 a9e0c397 Iustin Pop
3597 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3598 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3599 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3600 a9e0c397 Iustin Pop
      # using the assumption than logical_id == physical_id (which in
3601 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3602 cff90b79 Iustin Pop
3603 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3604 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3605 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3606 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3607 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3608 cff90b79 Iustin Pop
      rlist = []
3609 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3610 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3611 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3612 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3613 cff90b79 Iustin Pop
3614 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3615 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3616 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3617 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3618 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3619 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3620 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3621 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3622 cff90b79 Iustin Pop
3623 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3624 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3625 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3626 a9e0c397 Iustin Pop
3627 cff90b79 Iustin Pop
      for disk in old_lvs:
3628 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3629 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3630 a9e0c397 Iustin Pop
3631 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3632 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3633 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3634 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3635 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3636 cff90b79 Iustin Pop
            warning("Can't rollback device %s", "manually cleanup unused"
3637 cff90b79 Iustin Pop
                    " logical volumes")
3638 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3639 a9e0c397 Iustin Pop
3640 a9e0c397 Iustin Pop
      dev.children = new_lvs
3641 a9e0c397 Iustin Pop
      cfg.Update(instance)
3642 a9e0c397 Iustin Pop
3643 cff90b79 Iustin Pop
    # Step: wait for sync
3644 a9e0c397 Iustin Pop
3645 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3646 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3647 a9e0c397 Iustin Pop
    # return value
3648 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3649 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3650 a9e0c397 Iustin Pop
3651 a9e0c397 Iustin Pop
    # so check manually all the devices
3652 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3653 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3654 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3655 a9e0c397 Iustin Pop
      if is_degr:
3656 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3657 a9e0c397 Iustin Pop
3658 cff90b79 Iustin Pop
    # Step: remove old storage
3659 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3660 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3661 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3662 a9e0c397 Iustin Pop
      for lv in old_lvs:
3663 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3664 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3665 cff90b79 Iustin Pop
          warning("Can't remove old LV", "manually remove unused LVs")
3666 a9e0c397 Iustin Pop
          continue
3667 a9e0c397 Iustin Pop
3668 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3669 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3670 a9e0c397 Iustin Pop

3671 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3672 a9e0c397 Iustin Pop
      - for all disks of the instance:
3673 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3674 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3675 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3676 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3677 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3678 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3679 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3680 a9e0c397 Iustin Pop
          not network enabled
3681 a9e0c397 Iustin Pop
      - wait for sync across all devices
3682 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3683 a9e0c397 Iustin Pop

3684 a9e0c397 Iustin Pop
    Failures are not very well handled.
3685 0834c866 Iustin Pop

3686 a9e0c397 Iustin Pop
    """
3687 0834c866 Iustin Pop
    steps_total = 6
3688 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3689 a9e0c397 Iustin Pop
    instance = self.instance
3690 a9e0c397 Iustin Pop
    iv_names = {}
3691 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3692 a9e0c397 Iustin Pop
    # start of work
3693 a9e0c397 Iustin Pop
    cfg = self.cfg
3694 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3695 a9e0c397 Iustin Pop
    new_node = self.new_node
3696 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3697 0834c866 Iustin Pop
3698 0834c866 Iustin Pop
    # Step: check device activation
3699 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3700 0834c866 Iustin Pop
    info("checking volume groups")
3701 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3702 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3703 0834c866 Iustin Pop
    if not results:
3704 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3705 0834c866 Iustin Pop
    for node in pri_node, new_node:
3706 0834c866 Iustin Pop
      res = results.get(node, False)
3707 0834c866 Iustin Pop
      if not res or my_vg not in res:
3708 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3709 0834c866 Iustin Pop
                                 (my_vg, node))
3710 0834c866 Iustin Pop
    for dev in instance.disks:
3711 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3712 0834c866 Iustin Pop
        continue
3713 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3714 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3715 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3716 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3717 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3718 0834c866 Iustin Pop
3719 0834c866 Iustin Pop
    # Step: check other node consistency
3720 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3721 0834c866 Iustin Pop
    for dev in instance.disks:
3722 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3723 0834c866 Iustin Pop
        continue
3724 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3725 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3726 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3727 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3728 0834c866 Iustin Pop
                                 pri_node)
3729 0834c866 Iustin Pop
3730 0834c866 Iustin Pop
    # Step: create new storage
3731 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3732 a9e0c397 Iustin Pop
    for dev in instance.disks:
3733 a9e0c397 Iustin Pop
      size = dev.size
3734 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3735 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3736 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3737 a9e0c397 Iustin Pop
      # are talking about the secondary node
3738 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3739 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3740 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3741 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3742 a9e0c397 Iustin Pop
                                   " node '%s'" %
3743 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3744 a9e0c397 Iustin Pop
3745 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3746 0834c866 Iustin Pop
3747 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3748 0834c866 Iustin Pop
    for dev in instance.disks:
3749 0834c866 Iustin Pop
      size = dev.size
3750 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3751 a9e0c397 Iustin Pop
      # create new devices on new_node
3752 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3753 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3754 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3755 a9e0c397 Iustin Pop
                              children=dev.children)
3756 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3757 3f78eef2 Iustin Pop
                                        new_drbd, False,
3758 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3759 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3760 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3761 a9e0c397 Iustin Pop
3762 0834c866 Iustin Pop
    for dev in instance.disks:
3763 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3764 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3765 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3766 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3767 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3768 6852c52f Guido Trotter
                "Please cleanup this device manually as soon as possible")
3769 a9e0c397 Iustin Pop
3770 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3771 642445d9 Iustin Pop
    done = 0
3772 642445d9 Iustin Pop
    for dev in instance.disks:
3773 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3774 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3775 642445d9 Iustin Pop
      # detach from network
3776 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3777 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3778 642445d9 Iustin Pop
      # standalone state
3779 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3780 642445d9 Iustin Pop
        done += 1
3781 642445d9 Iustin Pop
      else:
3782 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3783 642445d9 Iustin Pop
                dev.iv_name)
3784 642445d9 Iustin Pop
3785 642445d9 Iustin Pop
    if not done:
3786 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3787 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3788 642445d9 Iustin Pop
3789 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3790 642445d9 Iustin Pop
    # the instance to point to the new secondary
3791 642445d9 Iustin Pop
    info("updating instance configuration")
3792 642445d9 Iustin Pop
    for dev in instance.disks:
3793 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3794 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3795 642445d9 Iustin Pop
    cfg.Update(instance)
3796 a9e0c397 Iustin Pop
3797 642445d9 Iustin Pop
    # and now perform the drbd attach
3798 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3799 642445d9 Iustin Pop
    failures = []
3800 642445d9 Iustin Pop
    for dev in instance.disks:
3801 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3802 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3803 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3804 642445d9 Iustin Pop
      # is correct
3805 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3806 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3807 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3808 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3809 a9e0c397 Iustin Pop
3810 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3811 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3812 a9e0c397 Iustin Pop
    # return value
3813 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3814 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3815 a9e0c397 Iustin Pop
3816 a9e0c397 Iustin Pop
    # so check manually all the devices
3817 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3818 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3819 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3820 a9e0c397 Iustin Pop
      if is_degr:
3821 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3822 a9e0c397 Iustin Pop
3823 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3824 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3825 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3826 a9e0c397 Iustin Pop
      for lv in old_lvs:
3827 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3828 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3829 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3830 0834c866 Iustin Pop
                  "Cleanup stale volumes by hand")
3831 a9e0c397 Iustin Pop
3832 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3833 a9e0c397 Iustin Pop
    """Execute disk replacement.
3834 a9e0c397 Iustin Pop

3835 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3836 a9e0c397 Iustin Pop

3837 a9e0c397 Iustin Pop
    """
3838 a9e0c397 Iustin Pop
    instance = self.instance
3839 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_REMOTE_RAID1:
3840 a9e0c397 Iustin Pop
      fn = self._ExecRR1
3841 a9e0c397 Iustin Pop
    elif instance.disk_template == constants.DT_DRBD8:
3842 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3843 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3844 a9e0c397 Iustin Pop
      else:
3845 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3846 a9e0c397 Iustin Pop
    else:
3847 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3848 a9e0c397 Iustin Pop
    return fn(feedback_fn)
3849 a9e0c397 Iustin Pop
3850 a8083063 Iustin Pop
3851 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3852 a8083063 Iustin Pop
  """Query runtime instance data.
3853 a8083063 Iustin Pop

3854 a8083063 Iustin Pop
  """
3855 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3856 a8083063 Iustin Pop
3857 a8083063 Iustin Pop
  def CheckPrereq(self):
3858 a8083063 Iustin Pop
    """Check prerequisites.
3859 a8083063 Iustin Pop

3860 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3861 a8083063 Iustin Pop

3862 a8083063 Iustin Pop
    """
3863 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3864 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3865 a8083063 Iustin Pop
    if self.op.instances:
3866 a8083063 Iustin Pop
      self.wanted_instances = []
3867 a8083063 Iustin Pop
      names = self.op.instances
3868 a8083063 Iustin Pop
      for name in names:
3869 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3870 a8083063 Iustin Pop
        if instance is None:
3871 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3872 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3873 a8083063 Iustin Pop
    else:
3874 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3875 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3876 a8083063 Iustin Pop
    return
3877 a8083063 Iustin Pop
3878 a8083063 Iustin Pop
3879 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3880 a8083063 Iustin Pop
    """Compute block device status.
3881 a8083063 Iustin Pop

3882 a8083063 Iustin Pop
    """
3883 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3884 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3885 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
3886 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3887 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3888 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3889 a8083063 Iustin Pop
      else:
3890 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3891 a8083063 Iustin Pop
3892 a8083063 Iustin Pop
    if snode:
3893 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3894 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3895 a8083063 Iustin Pop
    else:
3896 a8083063 Iustin Pop
      dev_sstatus = None
3897 a8083063 Iustin Pop
3898 a8083063 Iustin Pop
    if dev.children:
3899 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3900 a8083063 Iustin Pop
                      for child in dev.children]
3901 a8083063 Iustin Pop
    else:
3902 a8083063 Iustin Pop
      dev_children = []
3903 a8083063 Iustin Pop
3904 a8083063 Iustin Pop
    data = {
3905 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3906 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3907 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3908 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3909 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3910 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3911 a8083063 Iustin Pop
      "children": dev_children,
3912 a8083063 Iustin Pop
      }
3913 a8083063 Iustin Pop
3914 a8083063 Iustin Pop
    return data
3915 a8083063 Iustin Pop
3916 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3917 a8083063 Iustin Pop
    """Gather and return data"""
3918 a8083063 Iustin Pop
    result = {}
3919 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3920 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3921 a8083063 Iustin Pop
                                                instance.name)
3922 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3923 a8083063 Iustin Pop
        remote_state = "up"
3924 a8083063 Iustin Pop
      else:
3925 a8083063 Iustin Pop
        remote_state = "down"
3926 a8083063 Iustin Pop
      if instance.status == "down":
3927 a8083063 Iustin Pop
        config_state = "down"
3928 a8083063 Iustin Pop
      else:
3929 a8083063 Iustin Pop
        config_state = "up"
3930 a8083063 Iustin Pop
3931 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3932 a8083063 Iustin Pop
               for device in instance.disks]
3933 a8083063 Iustin Pop
3934 a8083063 Iustin Pop
      idict = {
3935 a8083063 Iustin Pop
        "name": instance.name,
3936 a8083063 Iustin Pop
        "config_state": config_state,
3937 a8083063 Iustin Pop
        "run_state": remote_state,
3938 a8083063 Iustin Pop
        "pnode": instance.primary_node,
3939 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
3940 a8083063 Iustin Pop
        "os": instance.os,
3941 a8083063 Iustin Pop
        "memory": instance.memory,
3942 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3943 a8083063 Iustin Pop
        "disks": disks,
3944 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
3945 a8083063 Iustin Pop
        }
3946 a8083063 Iustin Pop
3947 a8083063 Iustin Pop
      result[instance.name] = idict
3948 a8083063 Iustin Pop
3949 a8083063 Iustin Pop
    return result
3950 a8083063 Iustin Pop
3951 a8083063 Iustin Pop
3952 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
3953 a8083063 Iustin Pop
  """Modifies an instances's parameters.
3954 a8083063 Iustin Pop

3955 a8083063 Iustin Pop
  """
3956 a8083063 Iustin Pop
  HPATH = "instance-modify"
3957 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3958 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3959 a8083063 Iustin Pop
3960 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3961 a8083063 Iustin Pop
    """Build hooks env.
3962 a8083063 Iustin Pop

3963 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
3964 a8083063 Iustin Pop

3965 a8083063 Iustin Pop
    """
3966 396e1b78 Michael Hanselmann
    args = dict()
3967 a8083063 Iustin Pop
    if self.mem:
3968 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
3969 a8083063 Iustin Pop
    if self.vcpus:
3970 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
3971 396e1b78 Michael Hanselmann
    if self.do_ip or self.do_bridge:
3972 396e1b78 Michael Hanselmann
      if self.do_ip:
3973 396e1b78 Michael Hanselmann
        ip = self.ip
3974 396e1b78 Michael Hanselmann
      else:
3975 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
3976 396e1b78 Michael Hanselmann
      if self.bridge:
3977 396e1b78 Michael Hanselmann
        bridge = self.bridge
3978 396e1b78 Michael Hanselmann
      else:
3979 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
3980 396e1b78 Michael Hanselmann
      args['nics'] = [(ip, bridge)]
3981 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
3982 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3983 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3984 a8083063 Iustin Pop
    return env, nl, nl
3985 a8083063 Iustin Pop
3986 a8083063 Iustin Pop
  def CheckPrereq(self):
3987 a8083063 Iustin Pop
    """Check prerequisites.
3988 a8083063 Iustin Pop

3989 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
3990 a8083063 Iustin Pop

3991 a8083063 Iustin Pop
    """
3992 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
3993 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
3994 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
3995 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
3996 a8083063 Iustin Pop
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
3997 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
3998 a8083063 Iustin Pop
    if self.mem is not None:
3999 a8083063 Iustin Pop
      try:
4000 a8083063 Iustin Pop
        self.mem = int(self.mem)
4001 a8083063 Iustin Pop
      except ValueError, err:
4002 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4003 a8083063 Iustin Pop
    if self.vcpus is not None:
4004 a8083063 Iustin Pop
      try:
4005 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4006 a8083063 Iustin Pop
      except ValueError, err:
4007 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4008 a8083063 Iustin Pop
    if self.ip is not None:
4009 a8083063 Iustin Pop
      self.do_ip = True
4010 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4011 a8083063 Iustin Pop
        self.ip = None
4012 a8083063 Iustin Pop
      else:
4013 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4014 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4015 a8083063 Iustin Pop
    else:
4016 a8083063 Iustin Pop
      self.do_ip = False
4017 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4018 a8083063 Iustin Pop
4019 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4020 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4021 a8083063 Iustin Pop
    if instance is None:
4022 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
4023 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4024 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4025 a8083063 Iustin Pop
    self.instance = instance
4026 a8083063 Iustin Pop
    return
4027 a8083063 Iustin Pop
4028 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4029 a8083063 Iustin Pop
    """Modifies an instance.
4030 a8083063 Iustin Pop

4031 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4032 a8083063 Iustin Pop
    """
4033 a8083063 Iustin Pop
    result = []
4034 a8083063 Iustin Pop
    instance = self.instance
4035 a8083063 Iustin Pop
    if self.mem:
4036 a8083063 Iustin Pop
      instance.memory = self.mem
4037 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4038 a8083063 Iustin Pop
    if self.vcpus:
4039 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4040 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4041 a8083063 Iustin Pop
    if self.do_ip:
4042 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4043 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4044 a8083063 Iustin Pop
    if self.bridge:
4045 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4046 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4047 a8083063 Iustin Pop
4048 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4049 a8083063 Iustin Pop
4050 a8083063 Iustin Pop
    return result
4051 a8083063 Iustin Pop
4052 a8083063 Iustin Pop
4053 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4054 a8083063 Iustin Pop
  """Query the exports list
4055 a8083063 Iustin Pop

4056 a8083063 Iustin Pop
  """
4057 a8083063 Iustin Pop
  _OP_REQP = []
4058 a8083063 Iustin Pop
4059 a8083063 Iustin Pop
  def CheckPrereq(self):
4060 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4061 a8083063 Iustin Pop

4062 a8083063 Iustin Pop
    """
4063 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4064 a8083063 Iustin Pop
4065 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4066 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4067 a8083063 Iustin Pop

4068 a8083063 Iustin Pop
    Returns:
4069 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4070 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4071 a8083063 Iustin Pop
      that node.
4072 a8083063 Iustin Pop

4073 a8083063 Iustin Pop
    """
4074 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4075 a8083063 Iustin Pop
4076 a8083063 Iustin Pop
4077 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4078 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4079 a8083063 Iustin Pop

4080 a8083063 Iustin Pop
  """
4081 a8083063 Iustin Pop
  HPATH = "instance-export"
4082 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4083 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4084 a8083063 Iustin Pop
4085 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4086 a8083063 Iustin Pop
    """Build hooks env.
4087 a8083063 Iustin Pop

4088 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4089 a8083063 Iustin Pop

4090 a8083063 Iustin Pop
    """
4091 a8083063 Iustin Pop
    env = {
4092 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4093 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4094 a8083063 Iustin Pop
      }
4095 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4096 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4097 a8083063 Iustin Pop
          self.op.target_node]
4098 a8083063 Iustin Pop
    return env, nl, nl
4099 a8083063 Iustin Pop
4100 a8083063 Iustin Pop
  def CheckPrereq(self):
4101 a8083063 Iustin Pop
    """Check prerequisites.
4102 a8083063 Iustin Pop

4103 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
4104 a8083063 Iustin Pop

4105 a8083063 Iustin Pop
    """
4106 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4107 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4108 a8083063 Iustin Pop
    if self.instance is None:
4109 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4110 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4111 a8083063 Iustin Pop
4112 a8083063 Iustin Pop
    # node verification
4113 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4114 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4115 a8083063 Iustin Pop
4116 a8083063 Iustin Pop
    if self.dst_node is None:
4117 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4118 3ecf6786 Iustin Pop
                                 self.op.target_node)
4119 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4120 a8083063 Iustin Pop
4121 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4122 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4123 a8083063 Iustin Pop

4124 a8083063 Iustin Pop
    """
4125 a8083063 Iustin Pop
    instance = self.instance
4126 a8083063 Iustin Pop
    dst_node = self.dst_node
4127 a8083063 Iustin Pop
    src_node = instance.primary_node
4128 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
4129 a8083063 Iustin Pop
    if self.op.shutdown:
4130 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
4131 5bfac263 Iustin Pop
      self.proc.ChainOpCode(op)
4132 a8083063 Iustin Pop
4133 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4134 a8083063 Iustin Pop
4135 a8083063 Iustin Pop
    snap_disks = []
4136 a8083063 Iustin Pop
4137 a8083063 Iustin Pop
    try:
4138 a8083063 Iustin Pop
      for disk in instance.disks:
4139 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4140 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4141 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4142 a8083063 Iustin Pop
4143 a8083063 Iustin Pop
          if not new_dev_name:
4144 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4145 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4146 a8083063 Iustin Pop
          else:
4147 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4148 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4149 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4150 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4151 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4152 a8083063 Iustin Pop
4153 a8083063 Iustin Pop
    finally:
4154 a8083063 Iustin Pop
      if self.op.shutdown:
4155 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
4156 a8083063 Iustin Pop
                                       force=False)
4157 5bfac263 Iustin Pop
        self.proc.ChainOpCode(op)
4158 a8083063 Iustin Pop
4159 a8083063 Iustin Pop
    # TODO: check for size
4160 a8083063 Iustin Pop
4161 a8083063 Iustin Pop
    for dev in snap_disks:
4162 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
4163 a8083063 Iustin Pop
                                           instance):
4164 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
4165 a8083063 Iustin Pop
                     " %s to node %s" %
4166 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
4167 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4168 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
4169 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
4170 a8083063 Iustin Pop
4171 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4172 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4173 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4174 a8083063 Iustin Pop
4175 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4176 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4177 a8083063 Iustin Pop
4178 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4179 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4180 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4181 a8083063 Iustin Pop
    if nodelist:
4182 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4183 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4184 a8083063 Iustin Pop
      for node in exportlist:
4185 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4186 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4187 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4188 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4189 5c947f38 Iustin Pop
4190 5c947f38 Iustin Pop
4191 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4192 5c947f38 Iustin Pop
  """Generic tags LU.
4193 5c947f38 Iustin Pop

4194 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4195 5c947f38 Iustin Pop

4196 5c947f38 Iustin Pop
  """
4197 5c947f38 Iustin Pop
  def CheckPrereq(self):
4198 5c947f38 Iustin Pop
    """Check prerequisites.
4199 5c947f38 Iustin Pop

4200 5c947f38 Iustin Pop
    """
4201 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4202 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4203 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4204 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4205 5c947f38 Iustin Pop
      if name is None:
4206 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4207 3ecf6786 Iustin Pop
                                   (self.op.name,))
4208 5c947f38 Iustin Pop
      self.op.name = name
4209 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4210 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4211 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4212 5c947f38 Iustin Pop
      if name is None:
4213 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4214 3ecf6786 Iustin Pop
                                   (self.op.name,))
4215 5c947f38 Iustin Pop
      self.op.name = name
4216 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4217 5c947f38 Iustin Pop
    else:
4218 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4219 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4220 5c947f38 Iustin Pop
4221 5c947f38 Iustin Pop
4222 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4223 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4224 5c947f38 Iustin Pop

4225 5c947f38 Iustin Pop
  """
4226 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4227 5c947f38 Iustin Pop
4228 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4229 5c947f38 Iustin Pop
    """Returns the tag list.
4230 5c947f38 Iustin Pop

4231 5c947f38 Iustin Pop
    """
4232 5c947f38 Iustin Pop
    return self.target.GetTags()
4233 5c947f38 Iustin Pop
4234 5c947f38 Iustin Pop
4235 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4236 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4237 73415719 Iustin Pop

4238 73415719 Iustin Pop
  """
4239 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4240 73415719 Iustin Pop
4241 73415719 Iustin Pop
  def CheckPrereq(self):
4242 73415719 Iustin Pop
    """Check prerequisites.
4243 73415719 Iustin Pop

4244 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4245 73415719 Iustin Pop

4246 73415719 Iustin Pop
    """
4247 73415719 Iustin Pop
    try:
4248 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4249 73415719 Iustin Pop
    except re.error, err:
4250 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4251 73415719 Iustin Pop
                                 (self.op.pattern, err))
4252 73415719 Iustin Pop
4253 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4254 73415719 Iustin Pop
    """Returns the tag list.
4255 73415719 Iustin Pop

4256 73415719 Iustin Pop
    """
4257 73415719 Iustin Pop
    cfg = self.cfg
4258 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4259 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4260 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4261 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4262 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4263 73415719 Iustin Pop
    results = []
4264 73415719 Iustin Pop
    for path, target in tgts:
4265 73415719 Iustin Pop
      for tag in target.GetTags():
4266 73415719 Iustin Pop
        if self.re.search(tag):
4267 73415719 Iustin Pop
          results.append((path, tag))
4268 73415719 Iustin Pop
    return results
4269 73415719 Iustin Pop
4270 73415719 Iustin Pop
4271 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4272 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4273 5c947f38 Iustin Pop

4274 5c947f38 Iustin Pop
  """
4275 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4276 5c947f38 Iustin Pop
4277 5c947f38 Iustin Pop
  def CheckPrereq(self):
4278 5c947f38 Iustin Pop
    """Check prerequisites.
4279 5c947f38 Iustin Pop

4280 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4281 5c947f38 Iustin Pop

4282 5c947f38 Iustin Pop
    """
4283 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4284 f27302fa Iustin Pop
    for tag in self.op.tags:
4285 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4286 5c947f38 Iustin Pop
4287 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4288 5c947f38 Iustin Pop
    """Sets the tag.
4289 5c947f38 Iustin Pop

4290 5c947f38 Iustin Pop
    """
4291 5c947f38 Iustin Pop
    try:
4292 f27302fa Iustin Pop
      for tag in self.op.tags:
4293 f27302fa Iustin Pop
        self.target.AddTag(tag)
4294 5c947f38 Iustin Pop
    except errors.TagError, err:
4295 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4296 5c947f38 Iustin Pop
    try:
4297 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4298 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4299 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4300 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4301 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4302 5c947f38 Iustin Pop
4303 5c947f38 Iustin Pop
4304 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4305 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4306 5c947f38 Iustin Pop

4307 5c947f38 Iustin Pop
  """
4308 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4309 5c947f38 Iustin Pop
4310 5c947f38 Iustin Pop
  def CheckPrereq(self):
4311 5c947f38 Iustin Pop
    """Check prerequisites.
4312 5c947f38 Iustin Pop

4313 5c947f38 Iustin Pop
    This checks that we have the given tag.
4314 5c947f38 Iustin Pop

4315 5c947f38 Iustin Pop
    """
4316 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4317 f27302fa Iustin Pop
    for tag in self.op.tags:
4318 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4319 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4320 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4321 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4322 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4323 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4324 f27302fa Iustin Pop
      diff_names.sort()
4325 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4326 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4327 5c947f38 Iustin Pop
4328 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4329 5c947f38 Iustin Pop
    """Remove the tag from the object.
4330 5c947f38 Iustin Pop

4331 5c947f38 Iustin Pop
    """
4332 f27302fa Iustin Pop
    for tag in self.op.tags:
4333 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4334 5c947f38 Iustin Pop
    try:
4335 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4336 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4337 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4338 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4339 3ecf6786 Iustin Pop
                                " aborted. Please retry.")