Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 7df43a76

History | View | Annotate | Download (143.2 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 a8083063 Iustin Pop
46 a8083063 Iustin Pop
class LogicalUnit(object):
47 396e1b78 Michael Hanselmann
  """Logical Unit base class.
48 a8083063 Iustin Pop

49 a8083063 Iustin Pop
  Subclasses must follow these rules:
50 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
51 a8083063 Iustin Pop
      with all the fields (even if as None)
52 a8083063 Iustin Pop
    - implement Exec
53 a8083063 Iustin Pop
    - implement BuildHooksEnv
54 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
55 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
56 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
57 a8083063 Iustin Pop

58 a8083063 Iustin Pop
  """
59 a8083063 Iustin Pop
  HPATH = None
60 a8083063 Iustin Pop
  HTYPE = None
61 a8083063 Iustin Pop
  _OP_REQP = []
62 a8083063 Iustin Pop
  REQ_CLUSTER = True
63 a8083063 Iustin Pop
  REQ_MASTER = True
64 a8083063 Iustin Pop
65 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
66 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
67 a8083063 Iustin Pop

68 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
69 a8083063 Iustin Pop
    validity.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    """
72 5bfac263 Iustin Pop
    self.proc = processor
73 a8083063 Iustin Pop
    self.op = op
74 a8083063 Iustin Pop
    self.cfg = cfg
75 a8083063 Iustin Pop
    self.sstore = sstore
76 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
77 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
78 a8083063 Iustin Pop
      if attr_val is None:
79 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
80 3ecf6786 Iustin Pop
                                   attr_name)
81 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
82 a8083063 Iustin Pop
      if not cfg.IsCluster():
83 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
84 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
85 a8083063 Iustin Pop
      if self.REQ_MASTER:
86 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
87 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
88 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
89 3ecf6786 Iustin Pop
                                     " node %s" % master)
90 a8083063 Iustin Pop
91 a8083063 Iustin Pop
  def CheckPrereq(self):
92 a8083063 Iustin Pop
    """Check prerequisites for this LU.
93 a8083063 Iustin Pop

94 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
95 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
96 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
97 a8083063 Iustin Pop
    allowed.
98 a8083063 Iustin Pop

99 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
100 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
101 a8083063 Iustin Pop

102 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
103 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
104 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
105 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
106 a8083063 Iustin Pop

107 a8083063 Iustin Pop
    """
108 a8083063 Iustin Pop
    raise NotImplementedError
109 a8083063 Iustin Pop
110 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
111 a8083063 Iustin Pop
    """Execute the LU.
112 a8083063 Iustin Pop

113 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
114 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
115 a8083063 Iustin Pop
    code, or expected.
116 a8083063 Iustin Pop

117 a8083063 Iustin Pop
    """
118 a8083063 Iustin Pop
    raise NotImplementedError
119 a8083063 Iustin Pop
120 a8083063 Iustin Pop
  def BuildHooksEnv(self):
121 a8083063 Iustin Pop
    """Build hooks environment for this LU.
122 a8083063 Iustin Pop

123 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
124 a8083063 Iustin Pop
    containing the environment that will be used for running the
125 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
126 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
127 a8083063 Iustin Pop
    the hook should run after the execution.
128 a8083063 Iustin Pop

129 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
130 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
131 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
132 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
133 a8083063 Iustin Pop

134 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
135 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
136 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
137 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
138 a8083063 Iustin Pop
    None).
139 a8083063 Iustin Pop

140 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
141 a8083063 Iustin Pop
    not be called.
142 a8083063 Iustin Pop

143 a8083063 Iustin Pop
    """
144 a8083063 Iustin Pop
    raise NotImplementedError
145 a8083063 Iustin Pop
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
148 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
149 a8083063 Iustin Pop

150 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
151 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
152 a8083063 Iustin Pop

153 a8083063 Iustin Pop
  """
154 a8083063 Iustin Pop
  HPATH = None
155 a8083063 Iustin Pop
  HTYPE = None
156 a8083063 Iustin Pop
157 a8083063 Iustin Pop
  def BuildHooksEnv(self):
158 a8083063 Iustin Pop
    """Build hooks env.
159 a8083063 Iustin Pop

160 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
161 a8083063 Iustin Pop

162 a8083063 Iustin Pop
    """
163 0e137c28 Iustin Pop
    return {}, [], []
164 a8083063 Iustin Pop
165 a8083063 Iustin Pop
166 c8a0948f Michael Hanselmann
def _RemoveHostFromEtcHosts(hostname):
167 c8a0948f Michael Hanselmann
  """Wrapper around utils.RemoteEtcHostsEntry.
168 c8a0948f Michael Hanselmann

169 c8a0948f Michael Hanselmann
  """
170 c8a0948f Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
171 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
172 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
173 c8a0948f Michael Hanselmann
174 c8a0948f Michael Hanselmann
175 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
176 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
177 83120a01 Michael Hanselmann

178 83120a01 Michael Hanselmann
  Args:
179 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
180 83120a01 Michael Hanselmann

181 83120a01 Michael Hanselmann
  """
182 3312b702 Iustin Pop
  if not isinstance(nodes, list):
183 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
184 dcb93971 Michael Hanselmann
185 dcb93971 Michael Hanselmann
  if nodes:
186 3312b702 Iustin Pop
    wanted = []
187 dcb93971 Michael Hanselmann
188 dcb93971 Michael Hanselmann
    for name in nodes:
189 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
190 dcb93971 Michael Hanselmann
      if node is None:
191 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
192 3312b702 Iustin Pop
      wanted.append(node)
193 dcb93971 Michael Hanselmann
194 dcb93971 Michael Hanselmann
  else:
195 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
196 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
197 3312b702 Iustin Pop
198 3312b702 Iustin Pop
199 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
200 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
201 3312b702 Iustin Pop

202 3312b702 Iustin Pop
  Args:
203 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
204 3312b702 Iustin Pop

205 3312b702 Iustin Pop
  """
206 3312b702 Iustin Pop
  if not isinstance(instances, list):
207 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
208 3312b702 Iustin Pop
209 3312b702 Iustin Pop
  if instances:
210 3312b702 Iustin Pop
    wanted = []
211 3312b702 Iustin Pop
212 3312b702 Iustin Pop
    for name in instances:
213 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
214 3312b702 Iustin Pop
      if instance is None:
215 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
216 3312b702 Iustin Pop
      wanted.append(instance)
217 3312b702 Iustin Pop
218 3312b702 Iustin Pop
  else:
219 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
220 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
221 dcb93971 Michael Hanselmann
222 dcb93971 Michael Hanselmann
223 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
224 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
225 83120a01 Michael Hanselmann

226 83120a01 Michael Hanselmann
  Args:
227 83120a01 Michael Hanselmann
    static: Static fields
228 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
229 83120a01 Michael Hanselmann

230 83120a01 Michael Hanselmann
  """
231 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
232 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
233 dcb93971 Michael Hanselmann
234 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
235 dcb93971 Michael Hanselmann
236 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
237 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
238 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
239 3ecf6786 Iustin Pop
                                          difference(all_fields)))
240 dcb93971 Michael Hanselmann
241 dcb93971 Michael Hanselmann
242 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
243 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
244 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
245 ecb215b5 Michael Hanselmann

246 ecb215b5 Michael Hanselmann
  Args:
247 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
248 396e1b78 Michael Hanselmann
  """
249 396e1b78 Michael Hanselmann
  env = {
250 0e137c28 Iustin Pop
    "OP_TARGET": name,
251 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
252 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
253 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
254 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
255 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
256 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
257 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
258 396e1b78 Michael Hanselmann
  }
259 396e1b78 Michael Hanselmann
260 396e1b78 Michael Hanselmann
  if nics:
261 396e1b78 Michael Hanselmann
    nic_count = len(nics)
262 396e1b78 Michael Hanselmann
    for idx, (ip, bridge) in enumerate(nics):
263 396e1b78 Michael Hanselmann
      if ip is None:
264 396e1b78 Michael Hanselmann
        ip = ""
265 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
266 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
267 396e1b78 Michael Hanselmann
  else:
268 396e1b78 Michael Hanselmann
    nic_count = 0
269 396e1b78 Michael Hanselmann
270 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
271 396e1b78 Michael Hanselmann
272 396e1b78 Michael Hanselmann
  return env
273 396e1b78 Michael Hanselmann
274 396e1b78 Michael Hanselmann
275 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
276 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
277 ecb215b5 Michael Hanselmann

278 ecb215b5 Michael Hanselmann
  Args:
279 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
280 ecb215b5 Michael Hanselmann
    override: dict of values to override
281 ecb215b5 Michael Hanselmann
  """
282 396e1b78 Michael Hanselmann
  args = {
283 396e1b78 Michael Hanselmann
    'name': instance.name,
284 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
285 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
286 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
287 396e1b78 Michael Hanselmann
    'status': instance.os,
288 396e1b78 Michael Hanselmann
    'memory': instance.memory,
289 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
290 396e1b78 Michael Hanselmann
    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
291 396e1b78 Michael Hanselmann
  }
292 396e1b78 Michael Hanselmann
  if override:
293 396e1b78 Michael Hanselmann
    args.update(override)
294 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
295 396e1b78 Michael Hanselmann
296 396e1b78 Michael Hanselmann
297 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
298 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
299 a8083063 Iustin Pop

300 a8083063 Iustin Pop
  Args:
301 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
302 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
303 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
304 a8083063 Iustin Pop

305 a8083063 Iustin Pop
  """
306 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
307 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
308 a8083063 Iustin Pop
  else:
309 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
310 a8083063 Iustin Pop
311 a8083063 Iustin Pop
  inthere = False
312 a8083063 Iustin Pop
313 a8083063 Iustin Pop
  save_lines = []
314 a8083063 Iustin Pop
  add_lines = []
315 a8083063 Iustin Pop
  removed = False
316 a8083063 Iustin Pop
317 4cc2a728 Michael Hanselmann
  for rawline in f:
318 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
319 a8083063 Iustin Pop
320 4cc2a728 Michael Hanselmann
    parts = rawline.rstrip('\r\n').split()
321 4cc2a728 Michael Hanselmann
322 4cc2a728 Michael Hanselmann
    # Ignore unwanted lines
323 4cc2a728 Michael Hanselmann
    if len(parts) >= 3 and not rawline.lstrip()[0] == '#':
324 4cc2a728 Michael Hanselmann
      fields = parts[0].split(',')
325 4cc2a728 Michael Hanselmann
      key = parts[2]
326 4cc2a728 Michael Hanselmann
327 4cc2a728 Michael Hanselmann
      haveall = True
328 4cc2a728 Michael Hanselmann
      havesome = False
329 4cc2a728 Michael Hanselmann
      for spec in [ ip, fullnode ]:
330 4cc2a728 Michael Hanselmann
        if spec not in fields:
331 4cc2a728 Michael Hanselmann
          haveall = False
332 4cc2a728 Michael Hanselmann
        if spec in fields:
333 4cc2a728 Michael Hanselmann
          havesome = True
334 4cc2a728 Michael Hanselmann
335 4cc2a728 Michael Hanselmann
      logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
336 4cc2a728 Michael Hanselmann
      if haveall and key == pubkey:
337 4cc2a728 Michael Hanselmann
        inthere = True
338 4cc2a728 Michael Hanselmann
        save_lines.append(rawline)
339 4cc2a728 Michael Hanselmann
        logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
340 4cc2a728 Michael Hanselmann
        continue
341 4cc2a728 Michael Hanselmann
342 4cc2a728 Michael Hanselmann
      if havesome and (not haveall or key != pubkey):
343 4cc2a728 Michael Hanselmann
        removed = True
344 4cc2a728 Michael Hanselmann
        logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
345 4cc2a728 Michael Hanselmann
        continue
346 a8083063 Iustin Pop
347 a8083063 Iustin Pop
    save_lines.append(rawline)
348 a8083063 Iustin Pop
349 a8083063 Iustin Pop
  if not inthere:
350 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
351 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
352 a8083063 Iustin Pop
353 a8083063 Iustin Pop
  if removed:
354 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
355 a8083063 Iustin Pop
356 a8083063 Iustin Pop
    # Write a new file and replace old.
357 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
358 82122173 Iustin Pop
                                   constants.DATA_DIR)
359 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
360 82122173 Iustin Pop
    try:
361 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
362 82122173 Iustin Pop
    finally:
363 82122173 Iustin Pop
      newfile.close()
364 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
365 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
366 a8083063 Iustin Pop
367 a8083063 Iustin Pop
  elif add_lines:
368 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
369 a8083063 Iustin Pop
    f.seek(0, 2)
370 a8083063 Iustin Pop
    for add in add_lines:
371 a8083063 Iustin Pop
      f.write(add)
372 a8083063 Iustin Pop
373 a8083063 Iustin Pop
  f.close()
374 a8083063 Iustin Pop
375 a8083063 Iustin Pop
376 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
377 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
378 a8083063 Iustin Pop

379 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
380 a8083063 Iustin Pop
  is the error message.
381 a8083063 Iustin Pop

382 a8083063 Iustin Pop
  """
383 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
384 a8083063 Iustin Pop
  if vgsize is None:
385 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
386 a8083063 Iustin Pop
  elif vgsize < 20480:
387 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
388 191a8385 Guido Trotter
            (vgname, vgsize))
389 a8083063 Iustin Pop
  return None
390 a8083063 Iustin Pop
391 a8083063 Iustin Pop
392 a8083063 Iustin Pop
def _InitSSHSetup(node):
393 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
394 a8083063 Iustin Pop

395 a8083063 Iustin Pop

396 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
397 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
398 a8083063 Iustin Pop

399 a8083063 Iustin Pop
  Args:
400 a8083063 Iustin Pop
    node: the name of this host as a fqdn
401 a8083063 Iustin Pop

402 a8083063 Iustin Pop
  """
403 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
404 a8083063 Iustin Pop
405 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
406 70d9e3d8 Iustin Pop
    if os.path.exists(name):
407 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
408 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
409 a8083063 Iustin Pop
410 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
411 70d9e3d8 Iustin Pop
                         "-f", priv_key,
412 a8083063 Iustin Pop
                         "-q", "-N", ""])
413 a8083063 Iustin Pop
  if result.failed:
414 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
415 3ecf6786 Iustin Pop
                             result.output)
416 a8083063 Iustin Pop
417 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
418 a8083063 Iustin Pop
  try:
419 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
420 a8083063 Iustin Pop
  finally:
421 a8083063 Iustin Pop
    f.close()
422 a8083063 Iustin Pop
423 a8083063 Iustin Pop
424 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
425 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
426 a8083063 Iustin Pop

427 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
428 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
429 a8083063 Iustin Pop

430 a8083063 Iustin Pop
  """
431 a8083063 Iustin Pop
  # Create pseudo random password
432 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
433 a8083063 Iustin Pop
  # and write it into sstore
434 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
435 a8083063 Iustin Pop
436 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
437 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
438 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
439 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
440 a8083063 Iustin Pop
  if result.failed:
441 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
442 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
443 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
444 a8083063 Iustin Pop
445 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
446 a8083063 Iustin Pop
447 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
448 a8083063 Iustin Pop
449 a8083063 Iustin Pop
  if result.failed:
450 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
451 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
452 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
453 a8083063 Iustin Pop
454 a8083063 Iustin Pop
455 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
456 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
457 bf6929a2 Alexander Schreiber

458 bf6929a2 Alexander Schreiber
  """
459 bf6929a2 Alexander Schreiber
  # check bridges existance
460 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
461 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
462 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
463 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
464 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
465 bf6929a2 Alexander Schreiber
466 bf6929a2 Alexander Schreiber
467 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
468 a8083063 Iustin Pop
  """Initialise the cluster.
469 a8083063 Iustin Pop

470 a8083063 Iustin Pop
  """
471 a8083063 Iustin Pop
  HPATH = "cluster-init"
472 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
473 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
474 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
475 a8083063 Iustin Pop
  REQ_CLUSTER = False
476 a8083063 Iustin Pop
477 a8083063 Iustin Pop
  def BuildHooksEnv(self):
478 a8083063 Iustin Pop
    """Build hooks env.
479 a8083063 Iustin Pop

480 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
481 a8083063 Iustin Pop
    ourselves in the post-run node list.
482 a8083063 Iustin Pop

483 a8083063 Iustin Pop
    """
484 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
485 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
486 a8083063 Iustin Pop
487 a8083063 Iustin Pop
  def CheckPrereq(self):
488 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
489 a8083063 Iustin Pop

490 a8083063 Iustin Pop
    """
491 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
492 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
493 a8083063 Iustin Pop
494 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
495 ff98055b Iustin Pop
496 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
497 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
498 130e907e Iustin Pop
                                 " range (%s). Please fix DNS or /etc/hosts." %
499 bcf043c9 Iustin Pop
                                 (hostname.ip,))
500 130e907e Iustin Pop
501 89e1fc26 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
502 a8083063 Iustin Pop
503 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, hostname.ip,
504 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
505 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
506 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
507 3ecf6786 Iustin Pop
                                 " belong to this host."
508 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
509 a8083063 Iustin Pop
510 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
511 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
512 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
513 16abfbc2 Alexander Schreiber
    if (secondary_ip and
514 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
515 16abfbc2 Alexander Schreiber
        (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
516 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT))):
517 16abfbc2 Alexander Schreiber
      raise errors.OpPrereqError("You gave %s as secondary IP,\n"
518 16abfbc2 Alexander Schreiber
                                 "but it does not belong to this host." %
519 16abfbc2 Alexander Schreiber
                                 secondary_ip)
520 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
521 a8083063 Iustin Pop
522 a8083063 Iustin Pop
    # checks presence of the volume group given
523 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
524 a8083063 Iustin Pop
525 a8083063 Iustin Pop
    if vgstatus:
526 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
527 a8083063 Iustin Pop
528 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
529 a8083063 Iustin Pop
                    self.op.mac_prefix):
530 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
531 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
532 a8083063 Iustin Pop
533 a8083063 Iustin Pop
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
534 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
535 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
536 a8083063 Iustin Pop
537 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
538 880478f8 Iustin Pop
    if result.failed:
539 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
540 8925faaa Iustin Pop
                                 (self.op.master_netdev,
541 8925faaa Iustin Pop
                                  result.output.strip()))
542 880478f8 Iustin Pop
543 7dd30006 Michael Hanselmann
    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
544 7dd30006 Michael Hanselmann
            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
545 7dd30006 Michael Hanselmann
      raise errors.OpPrereqError("Init.d script '%s' missing or not "
546 7dd30006 Michael Hanselmann
                                 "executable." % constants.NODE_INITD_SCRIPT)
547 c7b46d59 Iustin Pop
548 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
549 a8083063 Iustin Pop
    """Initialize the cluster.
550 a8083063 Iustin Pop

551 a8083063 Iustin Pop
    """
552 a8083063 Iustin Pop
    clustername = self.clustername
553 a8083063 Iustin Pop
    hostname = self.hostname
554 a8083063 Iustin Pop
555 a8083063 Iustin Pop
    # set up the simple store
556 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
557 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
558 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
559 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
560 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
561 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
562 a8083063 Iustin Pop
563 a8083063 Iustin Pop
    # set up the inter-node password and certificate
564 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
565 a8083063 Iustin Pop
566 a8083063 Iustin Pop
    # start the master ip
567 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
568 a8083063 Iustin Pop
569 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
570 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
571 a8083063 Iustin Pop
    try:
572 a8083063 Iustin Pop
      sshline = f.read()
573 a8083063 Iustin Pop
    finally:
574 a8083063 Iustin Pop
      f.close()
575 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
576 a8083063 Iustin Pop
577 c8a0948f Michael Hanselmann
    hi = utils.HostInfo(name=hostname.name)
578 c8a0948f Michael Hanselmann
    utils.AddEtcHostsEntry(constants.ETC_HOSTS, hostname.name, hi.ip)
579 c8a0948f Michael Hanselmann
    utils.AddEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName(), hi.ip)
580 c8a0948f Michael Hanselmann
    del hi
581 a8083063 Iustin Pop
582 bcf043c9 Iustin Pop
    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
583 a8083063 Iustin Pop
584 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
585 a8083063 Iustin Pop
586 a8083063 Iustin Pop
    # init of cluster config file
587 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
588 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
589 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
590 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
591 a8083063 Iustin Pop
592 a8083063 Iustin Pop
593 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
594 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
595 a8083063 Iustin Pop

596 a8083063 Iustin Pop
  """
597 a8083063 Iustin Pop
  _OP_REQP = []
598 a8083063 Iustin Pop
599 a8083063 Iustin Pop
  def CheckPrereq(self):
600 a8083063 Iustin Pop
    """Check prerequisites.
601 a8083063 Iustin Pop

602 a8083063 Iustin Pop
    This checks whether the cluster is empty.
603 a8083063 Iustin Pop

604 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
605 a8083063 Iustin Pop

606 a8083063 Iustin Pop
    """
607 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
608 a8083063 Iustin Pop
609 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
610 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
611 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
612 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
613 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
614 db915bd1 Michael Hanselmann
    if instancelist:
615 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
616 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
617 a8083063 Iustin Pop
618 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
619 a8083063 Iustin Pop
    """Destroys the cluster.
620 a8083063 Iustin Pop

621 a8083063 Iustin Pop
    """
622 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
623 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
624 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
625 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
626 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
627 c8a0948f Michael Hanselmann
    _RemoveHostFromEtcHosts(master)
628 a8083063 Iustin Pop
629 a8083063 Iustin Pop
630 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
631 a8083063 Iustin Pop
  """Verifies the cluster status.
632 a8083063 Iustin Pop

633 a8083063 Iustin Pop
  """
634 a8083063 Iustin Pop
  _OP_REQP = []
635 a8083063 Iustin Pop
636 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
637 a8083063 Iustin Pop
                  remote_version, feedback_fn):
638 a8083063 Iustin Pop
    """Run multiple tests against a node.
639 a8083063 Iustin Pop

640 a8083063 Iustin Pop
    Test list:
641 a8083063 Iustin Pop
      - compares ganeti version
642 a8083063 Iustin Pop
      - checks vg existance and size > 20G
643 a8083063 Iustin Pop
      - checks config file checksum
644 a8083063 Iustin Pop
      - checks ssh to other nodes
645 a8083063 Iustin Pop

646 a8083063 Iustin Pop
    Args:
647 a8083063 Iustin Pop
      node: name of the node to check
648 a8083063 Iustin Pop
      file_list: required list of files
649 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
650 098c0958 Michael Hanselmann

651 a8083063 Iustin Pop
    """
652 a8083063 Iustin Pop
    # compares ganeti version
653 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
654 a8083063 Iustin Pop
    if not remote_version:
655 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
656 a8083063 Iustin Pop
      return True
657 a8083063 Iustin Pop
658 a8083063 Iustin Pop
    if local_version != remote_version:
659 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
660 a8083063 Iustin Pop
                      (local_version, node, remote_version))
661 a8083063 Iustin Pop
      return True
662 a8083063 Iustin Pop
663 a8083063 Iustin Pop
    # checks vg existance and size > 20G
664 a8083063 Iustin Pop
665 a8083063 Iustin Pop
    bad = False
666 a8083063 Iustin Pop
    if not vglist:
667 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
668 a8083063 Iustin Pop
                      (node,))
669 a8083063 Iustin Pop
      bad = True
670 a8083063 Iustin Pop
    else:
671 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
672 a8083063 Iustin Pop
      if vgstatus:
673 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
674 a8083063 Iustin Pop
        bad = True
675 a8083063 Iustin Pop
676 a8083063 Iustin Pop
    # checks config file checksum
677 a8083063 Iustin Pop
    # checks ssh to any
678 a8083063 Iustin Pop
679 a8083063 Iustin Pop
    if 'filelist' not in node_result:
680 a8083063 Iustin Pop
      bad = True
681 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
682 a8083063 Iustin Pop
    else:
683 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
684 a8083063 Iustin Pop
      for file_name in file_list:
685 a8083063 Iustin Pop
        if file_name not in remote_cksum:
686 a8083063 Iustin Pop
          bad = True
687 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
688 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
689 a8083063 Iustin Pop
          bad = True
690 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
691 a8083063 Iustin Pop
692 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
693 a8083063 Iustin Pop
      bad = True
694 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
695 a8083063 Iustin Pop
    else:
696 a8083063 Iustin Pop
      if node_result['nodelist']:
697 a8083063 Iustin Pop
        bad = True
698 a8083063 Iustin Pop
        for node in node_result['nodelist']:
699 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
700 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
701 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
702 a8083063 Iustin Pop
    if hyp_result is not None:
703 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
704 a8083063 Iustin Pop
    return bad
705 a8083063 Iustin Pop
706 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
707 a8083063 Iustin Pop
    """Verify an instance.
708 a8083063 Iustin Pop

709 a8083063 Iustin Pop
    This function checks to see if the required block devices are
710 a8083063 Iustin Pop
    available on the instance's node.
711 a8083063 Iustin Pop

712 a8083063 Iustin Pop
    """
713 a8083063 Iustin Pop
    bad = False
714 a8083063 Iustin Pop
715 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
716 a8083063 Iustin Pop
    if not instance in instancelist:
717 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
718 a8083063 Iustin Pop
                      (instance, instancelist))
719 a8083063 Iustin Pop
      bad = True
720 a8083063 Iustin Pop
721 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
722 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
723 a8083063 Iustin Pop
724 a8083063 Iustin Pop
    node_vol_should = {}
725 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
726 a8083063 Iustin Pop
727 a8083063 Iustin Pop
    for node in node_vol_should:
728 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
729 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
730 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
731 a8083063 Iustin Pop
                          (volume, node))
732 a8083063 Iustin Pop
          bad = True
733 a8083063 Iustin Pop
734 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
735 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
736 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
737 a8083063 Iustin Pop
                        (instance, node_current))
738 a8083063 Iustin Pop
        bad = True
739 a8083063 Iustin Pop
740 a8083063 Iustin Pop
    for node in node_instance:
741 a8083063 Iustin Pop
      if (not node == node_current):
742 a8083063 Iustin Pop
        if instance in node_instance[node]:
743 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
744 a8083063 Iustin Pop
                          (instance, node))
745 a8083063 Iustin Pop
          bad = True
746 a8083063 Iustin Pop
747 6a438c98 Michael Hanselmann
    return bad
748 a8083063 Iustin Pop
749 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
750 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
751 a8083063 Iustin Pop

752 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
753 a8083063 Iustin Pop
    reported as unknown.
754 a8083063 Iustin Pop

755 a8083063 Iustin Pop
    """
756 a8083063 Iustin Pop
    bad = False
757 a8083063 Iustin Pop
758 a8083063 Iustin Pop
    for node in node_vol_is:
759 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
760 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
761 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
762 a8083063 Iustin Pop
                      (volume, node))
763 a8083063 Iustin Pop
          bad = True
764 a8083063 Iustin Pop
    return bad
765 a8083063 Iustin Pop
766 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
767 a8083063 Iustin Pop
    """Verify the list of running instances.
768 a8083063 Iustin Pop

769 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
770 a8083063 Iustin Pop

771 a8083063 Iustin Pop
    """
772 a8083063 Iustin Pop
    bad = False
773 a8083063 Iustin Pop
    for node in node_instance:
774 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
775 a8083063 Iustin Pop
        if runninginstance not in instancelist:
776 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
777 a8083063 Iustin Pop
                          (runninginstance, node))
778 a8083063 Iustin Pop
          bad = True
779 a8083063 Iustin Pop
    return bad
780 a8083063 Iustin Pop
781 a8083063 Iustin Pop
  def CheckPrereq(self):
782 a8083063 Iustin Pop
    """Check prerequisites.
783 a8083063 Iustin Pop

784 a8083063 Iustin Pop
    This has no prerequisites.
785 a8083063 Iustin Pop

786 a8083063 Iustin Pop
    """
787 a8083063 Iustin Pop
    pass
788 a8083063 Iustin Pop
789 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
790 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
791 a8083063 Iustin Pop

792 a8083063 Iustin Pop
    """
793 a8083063 Iustin Pop
    bad = False
794 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
795 a8083063 Iustin Pop
    self.cfg.VerifyConfig()
796 a8083063 Iustin Pop
797 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
798 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
799 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
800 a8083063 Iustin Pop
    node_volume = {}
801 a8083063 Iustin Pop
    node_instance = {}
802 a8083063 Iustin Pop
803 a8083063 Iustin Pop
    # FIXME: verify OS list
804 a8083063 Iustin Pop
    # do local checksums
805 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
806 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
807 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
808 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
809 a8083063 Iustin Pop
810 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
811 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
812 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
813 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
814 a8083063 Iustin Pop
    node_verify_param = {
815 a8083063 Iustin Pop
      'filelist': file_names,
816 a8083063 Iustin Pop
      'nodelist': nodelist,
817 a8083063 Iustin Pop
      'hypervisor': None,
818 a8083063 Iustin Pop
      }
819 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
820 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
821 a8083063 Iustin Pop
822 a8083063 Iustin Pop
    for node in nodelist:
823 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
824 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
825 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
826 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
827 a8083063 Iustin Pop
      bad = bad or result
828 a8083063 Iustin Pop
829 a8083063 Iustin Pop
      # node_volume
830 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
831 a8083063 Iustin Pop
832 a8083063 Iustin Pop
      if type(volumeinfo) != dict:
833 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
834 a8083063 Iustin Pop
        bad = True
835 a8083063 Iustin Pop
        continue
836 a8083063 Iustin Pop
837 a8083063 Iustin Pop
      node_volume[node] = volumeinfo
838 a8083063 Iustin Pop
839 a8083063 Iustin Pop
      # node_instance
840 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
841 a8083063 Iustin Pop
      if type(nodeinstance) != list:
842 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
843 a8083063 Iustin Pop
        bad = True
844 a8083063 Iustin Pop
        continue
845 a8083063 Iustin Pop
846 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
847 a8083063 Iustin Pop
848 a8083063 Iustin Pop
    node_vol_should = {}
849 a8083063 Iustin Pop
850 a8083063 Iustin Pop
    for instance in instancelist:
851 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
852 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
853 a8083063 Iustin Pop
                                     feedback_fn)
854 a8083063 Iustin Pop
      bad = bad or result
855 a8083063 Iustin Pop
856 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
857 a8083063 Iustin Pop
858 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
859 a8083063 Iustin Pop
860 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
861 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
862 a8083063 Iustin Pop
                                       feedback_fn)
863 a8083063 Iustin Pop
    bad = bad or result
864 a8083063 Iustin Pop
865 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
866 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
867 a8083063 Iustin Pop
                                         feedback_fn)
868 a8083063 Iustin Pop
    bad = bad or result
869 a8083063 Iustin Pop
870 a8083063 Iustin Pop
    return int(bad)
871 a8083063 Iustin Pop
872 a8083063 Iustin Pop
873 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
874 07bd8a51 Iustin Pop
  """Rename the cluster.
875 07bd8a51 Iustin Pop

876 07bd8a51 Iustin Pop
  """
877 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
878 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
879 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
880 07bd8a51 Iustin Pop
881 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
882 07bd8a51 Iustin Pop
    """Build hooks env.
883 07bd8a51 Iustin Pop

884 07bd8a51 Iustin Pop
    """
885 07bd8a51 Iustin Pop
    env = {
886 0e137c28 Iustin Pop
      "OP_TARGET": self.op.sstore.GetClusterName(),
887 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
888 07bd8a51 Iustin Pop
      }
889 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
890 07bd8a51 Iustin Pop
    return env, [mn], [mn]
891 07bd8a51 Iustin Pop
892 07bd8a51 Iustin Pop
  def CheckPrereq(self):
893 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
894 07bd8a51 Iustin Pop

895 07bd8a51 Iustin Pop
    """
896 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
897 07bd8a51 Iustin Pop
898 bcf043c9 Iustin Pop
    new_name = hostname.name
899 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
900 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
901 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
902 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
903 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
904 07bd8a51 Iustin Pop
                                 " cluster has changed")
905 07bd8a51 Iustin Pop
    if new_ip != old_ip:
906 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
907 07bd8a51 Iustin Pop
      if not result.failed:
908 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
909 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
910 07bd8a51 Iustin Pop
                                   new_ip)
911 07bd8a51 Iustin Pop
912 07bd8a51 Iustin Pop
    self.op.name = new_name
913 07bd8a51 Iustin Pop
914 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
915 07bd8a51 Iustin Pop
    """Rename the cluster.
916 07bd8a51 Iustin Pop

917 07bd8a51 Iustin Pop
    """
918 07bd8a51 Iustin Pop
    clustername = self.op.name
919 07bd8a51 Iustin Pop
    ip = self.ip
920 07bd8a51 Iustin Pop
    ss = self.sstore
921 07bd8a51 Iustin Pop
922 07bd8a51 Iustin Pop
    # shutdown the master IP
923 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
924 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
925 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
926 07bd8a51 Iustin Pop
927 07bd8a51 Iustin Pop
    try:
928 07bd8a51 Iustin Pop
      # modify the sstore
929 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
930 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
931 07bd8a51 Iustin Pop
932 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
933 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
934 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
935 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
936 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
937 07bd8a51 Iustin Pop
938 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
939 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
940 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
941 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
942 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
943 07bd8a51 Iustin Pop
          if not result[to_node]:
944 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
945 07bd8a51 Iustin Pop
                         (fname, to_node))
946 07bd8a51 Iustin Pop
    finally:
947 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
948 07bd8a51 Iustin Pop
        logger.Error("Could not re-enable the master role on the master,\n"
949 07bd8a51 Iustin Pop
                     "please restart manually.")
950 07bd8a51 Iustin Pop
951 07bd8a51 Iustin Pop
952 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
953 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
954 a8083063 Iustin Pop

955 a8083063 Iustin Pop
  """
956 a8083063 Iustin Pop
  if not instance.disks:
957 a8083063 Iustin Pop
    return True
958 a8083063 Iustin Pop
959 a8083063 Iustin Pop
  if not oneshot:
960 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
961 a8083063 Iustin Pop
962 a8083063 Iustin Pop
  node = instance.primary_node
963 a8083063 Iustin Pop
964 a8083063 Iustin Pop
  for dev in instance.disks:
965 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
966 a8083063 Iustin Pop
967 a8083063 Iustin Pop
  retries = 0
968 a8083063 Iustin Pop
  while True:
969 a8083063 Iustin Pop
    max_time = 0
970 a8083063 Iustin Pop
    done = True
971 a8083063 Iustin Pop
    cumul_degraded = False
972 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
973 a8083063 Iustin Pop
    if not rstats:
974 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
975 a8083063 Iustin Pop
      retries += 1
976 a8083063 Iustin Pop
      if retries >= 10:
977 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
978 3ecf6786 Iustin Pop
                                 " aborting." % node)
979 a8083063 Iustin Pop
      time.sleep(6)
980 a8083063 Iustin Pop
      continue
981 a8083063 Iustin Pop
    retries = 0
982 a8083063 Iustin Pop
    for i in range(len(rstats)):
983 a8083063 Iustin Pop
      mstat = rstats[i]
984 a8083063 Iustin Pop
      if mstat is None:
985 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
986 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
987 a8083063 Iustin Pop
        continue
988 0834c866 Iustin Pop
      # we ignore the ldisk parameter
989 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
990 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
991 a8083063 Iustin Pop
      if perc_done is not None:
992 a8083063 Iustin Pop
        done = False
993 a8083063 Iustin Pop
        if est_time is not None:
994 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
995 a8083063 Iustin Pop
          max_time = est_time
996 a8083063 Iustin Pop
        else:
997 a8083063 Iustin Pop
          rem_time = "no time estimate"
998 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
999 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1000 a8083063 Iustin Pop
    if done or oneshot:
1001 a8083063 Iustin Pop
      break
1002 a8083063 Iustin Pop
1003 a8083063 Iustin Pop
    if unlock:
1004 a8083063 Iustin Pop
      utils.Unlock('cmd')
1005 a8083063 Iustin Pop
    try:
1006 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1007 a8083063 Iustin Pop
    finally:
1008 a8083063 Iustin Pop
      if unlock:
1009 a8083063 Iustin Pop
        utils.Lock('cmd')
1010 a8083063 Iustin Pop
1011 a8083063 Iustin Pop
  if done:
1012 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1013 a8083063 Iustin Pop
  return not cumul_degraded
1014 a8083063 Iustin Pop
1015 a8083063 Iustin Pop
1016 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1017 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1018 a8083063 Iustin Pop

1019 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1020 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1021 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1022 0834c866 Iustin Pop

1023 a8083063 Iustin Pop
  """
1024 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1025 0834c866 Iustin Pop
  if ldisk:
1026 0834c866 Iustin Pop
    idx = 6
1027 0834c866 Iustin Pop
  else:
1028 0834c866 Iustin Pop
    idx = 5
1029 a8083063 Iustin Pop
1030 a8083063 Iustin Pop
  result = True
1031 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1032 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1033 a8083063 Iustin Pop
    if not rstats:
1034 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1035 a8083063 Iustin Pop
      result = False
1036 a8083063 Iustin Pop
    else:
1037 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1038 a8083063 Iustin Pop
  if dev.children:
1039 a8083063 Iustin Pop
    for child in dev.children:
1040 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1041 a8083063 Iustin Pop
1042 a8083063 Iustin Pop
  return result
1043 a8083063 Iustin Pop
1044 a8083063 Iustin Pop
1045 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1046 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1047 a8083063 Iustin Pop

1048 a8083063 Iustin Pop
  """
1049 a8083063 Iustin Pop
  _OP_REQP = []
1050 a8083063 Iustin Pop
1051 a8083063 Iustin Pop
  def CheckPrereq(self):
1052 a8083063 Iustin Pop
    """Check prerequisites.
1053 a8083063 Iustin Pop

1054 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1055 a8083063 Iustin Pop

1056 a8083063 Iustin Pop
    """
1057 a8083063 Iustin Pop
    return
1058 a8083063 Iustin Pop
1059 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1060 a8083063 Iustin Pop
    """Compute the list of OSes.
1061 a8083063 Iustin Pop

1062 a8083063 Iustin Pop
    """
1063 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1064 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1065 a8083063 Iustin Pop
    if node_data == False:
1066 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1067 a8083063 Iustin Pop
    return node_data
1068 a8083063 Iustin Pop
1069 a8083063 Iustin Pop
1070 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1071 a8083063 Iustin Pop
  """Logical unit for removing a node.
1072 a8083063 Iustin Pop

1073 a8083063 Iustin Pop
  """
1074 a8083063 Iustin Pop
  HPATH = "node-remove"
1075 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1076 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1077 a8083063 Iustin Pop
1078 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1079 a8083063 Iustin Pop
    """Build hooks env.
1080 a8083063 Iustin Pop

1081 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1082 a8083063 Iustin Pop
    node would not allows itself to run.
1083 a8083063 Iustin Pop

1084 a8083063 Iustin Pop
    """
1085 396e1b78 Michael Hanselmann
    env = {
1086 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1087 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1088 396e1b78 Michael Hanselmann
      }
1089 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1090 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1091 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1092 a8083063 Iustin Pop
1093 a8083063 Iustin Pop
  def CheckPrereq(self):
1094 a8083063 Iustin Pop
    """Check prerequisites.
1095 a8083063 Iustin Pop

1096 a8083063 Iustin Pop
    This checks:
1097 a8083063 Iustin Pop
     - the node exists in the configuration
1098 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1099 a8083063 Iustin Pop
     - it's not the master
1100 a8083063 Iustin Pop

1101 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1102 a8083063 Iustin Pop

1103 a8083063 Iustin Pop
    """
1104 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1105 a8083063 Iustin Pop
    if node is None:
1106 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1107 a8083063 Iustin Pop
1108 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1109 a8083063 Iustin Pop
1110 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1111 a8083063 Iustin Pop
    if node.name == masternode:
1112 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1113 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1114 a8083063 Iustin Pop
1115 a8083063 Iustin Pop
    for instance_name in instance_list:
1116 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1117 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1118 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1119 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1120 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1121 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1122 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1123 a8083063 Iustin Pop
    self.op.node_name = node.name
1124 a8083063 Iustin Pop
    self.node = node
1125 a8083063 Iustin Pop
1126 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1127 a8083063 Iustin Pop
    """Removes the node from the cluster.
1128 a8083063 Iustin Pop

1129 a8083063 Iustin Pop
    """
1130 a8083063 Iustin Pop
    node = self.node
1131 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1132 a8083063 Iustin Pop
                node.name)
1133 a8083063 Iustin Pop
1134 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1135 a8083063 Iustin Pop
1136 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1137 a8083063 Iustin Pop
1138 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1139 a8083063 Iustin Pop
1140 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1141 a8083063 Iustin Pop
1142 c8a0948f Michael Hanselmann
    _RemoveHostFromEtcHosts(node.name)
1143 c8a0948f Michael Hanselmann
1144 a8083063 Iustin Pop
1145 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1146 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1147 a8083063 Iustin Pop

1148 a8083063 Iustin Pop
  """
1149 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1150 a8083063 Iustin Pop
1151 a8083063 Iustin Pop
  def CheckPrereq(self):
1152 a8083063 Iustin Pop
    """Check prerequisites.
1153 a8083063 Iustin Pop

1154 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1155 a8083063 Iustin Pop

1156 a8083063 Iustin Pop
    """
1157 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1158 3ef10550 Michael Hanselmann
                                     "mtotal", "mnode", "mfree",
1159 3ef10550 Michael Hanselmann
                                     "bootid"])
1160 a8083063 Iustin Pop
1161 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1162 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1163 ec223efb Iustin Pop
                               "pip", "sip"],
1164 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1165 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1166 a8083063 Iustin Pop
1167 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1168 a8083063 Iustin Pop
1169 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1170 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1171 a8083063 Iustin Pop

1172 a8083063 Iustin Pop
    """
1173 246e180a Iustin Pop
    nodenames = self.wanted
1174 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1175 a8083063 Iustin Pop
1176 a8083063 Iustin Pop
    # begin data gathering
1177 a8083063 Iustin Pop
1178 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1179 a8083063 Iustin Pop
      live_data = {}
1180 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1181 a8083063 Iustin Pop
      for name in nodenames:
1182 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1183 a8083063 Iustin Pop
        if nodeinfo:
1184 a8083063 Iustin Pop
          live_data[name] = {
1185 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1186 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1187 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1188 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1189 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1190 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1191 a8083063 Iustin Pop
            }
1192 a8083063 Iustin Pop
        else:
1193 a8083063 Iustin Pop
          live_data[name] = {}
1194 a8083063 Iustin Pop
    else:
1195 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1196 a8083063 Iustin Pop
1197 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1198 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1199 a8083063 Iustin Pop
1200 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1201 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1202 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1203 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1204 a8083063 Iustin Pop
1205 ec223efb Iustin Pop
      for instance_name in instancelist:
1206 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1207 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1208 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1209 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1210 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1211 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1212 a8083063 Iustin Pop
1213 a8083063 Iustin Pop
    # end data gathering
1214 a8083063 Iustin Pop
1215 a8083063 Iustin Pop
    output = []
1216 a8083063 Iustin Pop
    for node in nodelist:
1217 a8083063 Iustin Pop
      node_output = []
1218 a8083063 Iustin Pop
      for field in self.op.output_fields:
1219 a8083063 Iustin Pop
        if field == "name":
1220 a8083063 Iustin Pop
          val = node.name
1221 ec223efb Iustin Pop
        elif field == "pinst_list":
1222 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1223 ec223efb Iustin Pop
        elif field == "sinst_list":
1224 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1225 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1226 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1227 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1228 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1229 a8083063 Iustin Pop
        elif field == "pip":
1230 a8083063 Iustin Pop
          val = node.primary_ip
1231 a8083063 Iustin Pop
        elif field == "sip":
1232 a8083063 Iustin Pop
          val = node.secondary_ip
1233 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1234 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1235 a8083063 Iustin Pop
        else:
1236 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1237 a8083063 Iustin Pop
        node_output.append(val)
1238 a8083063 Iustin Pop
      output.append(node_output)
1239 a8083063 Iustin Pop
1240 a8083063 Iustin Pop
    return output
1241 a8083063 Iustin Pop
1242 a8083063 Iustin Pop
1243 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1244 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1245 dcb93971 Michael Hanselmann

1246 dcb93971 Michael Hanselmann
  """
1247 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1248 dcb93971 Michael Hanselmann
1249 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1250 dcb93971 Michael Hanselmann
    """Check prerequisites.
1251 dcb93971 Michael Hanselmann

1252 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1253 dcb93971 Michael Hanselmann

1254 dcb93971 Michael Hanselmann
    """
1255 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1256 dcb93971 Michael Hanselmann
1257 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1258 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1259 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1260 dcb93971 Michael Hanselmann
1261 dcb93971 Michael Hanselmann
1262 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1263 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1264 dcb93971 Michael Hanselmann

1265 dcb93971 Michael Hanselmann
    """
1266 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1267 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1268 dcb93971 Michael Hanselmann
1269 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1270 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1271 dcb93971 Michael Hanselmann
1272 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1273 dcb93971 Michael Hanselmann
1274 dcb93971 Michael Hanselmann
    output = []
1275 dcb93971 Michael Hanselmann
    for node in nodenames:
1276 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1277 37d19eb2 Michael Hanselmann
        continue
1278 37d19eb2 Michael Hanselmann
1279 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1280 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1281 dcb93971 Michael Hanselmann
1282 dcb93971 Michael Hanselmann
      for vol in node_vols:
1283 dcb93971 Michael Hanselmann
        node_output = []
1284 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1285 dcb93971 Michael Hanselmann
          if field == "node":
1286 dcb93971 Michael Hanselmann
            val = node
1287 dcb93971 Michael Hanselmann
          elif field == "phys":
1288 dcb93971 Michael Hanselmann
            val = vol['dev']
1289 dcb93971 Michael Hanselmann
          elif field == "vg":
1290 dcb93971 Michael Hanselmann
            val = vol['vg']
1291 dcb93971 Michael Hanselmann
          elif field == "name":
1292 dcb93971 Michael Hanselmann
            val = vol['name']
1293 dcb93971 Michael Hanselmann
          elif field == "size":
1294 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1295 dcb93971 Michael Hanselmann
          elif field == "instance":
1296 dcb93971 Michael Hanselmann
            for inst in ilist:
1297 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1298 dcb93971 Michael Hanselmann
                continue
1299 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1300 dcb93971 Michael Hanselmann
                val = inst.name
1301 dcb93971 Michael Hanselmann
                break
1302 dcb93971 Michael Hanselmann
            else:
1303 dcb93971 Michael Hanselmann
              val = '-'
1304 dcb93971 Michael Hanselmann
          else:
1305 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1306 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1307 dcb93971 Michael Hanselmann
1308 dcb93971 Michael Hanselmann
        output.append(node_output)
1309 dcb93971 Michael Hanselmann
1310 dcb93971 Michael Hanselmann
    return output
1311 dcb93971 Michael Hanselmann
1312 dcb93971 Michael Hanselmann
1313 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1314 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1315 a8083063 Iustin Pop

1316 a8083063 Iustin Pop
  """
1317 a8083063 Iustin Pop
  HPATH = "node-add"
1318 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1319 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1320 a8083063 Iustin Pop
1321 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1322 a8083063 Iustin Pop
    """Build hooks env.
1323 a8083063 Iustin Pop

1324 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1325 a8083063 Iustin Pop

1326 a8083063 Iustin Pop
    """
1327 a8083063 Iustin Pop
    env = {
1328 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1329 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1330 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1331 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1332 a8083063 Iustin Pop
      }
1333 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1334 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1335 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1336 a8083063 Iustin Pop
1337 a8083063 Iustin Pop
  def CheckPrereq(self):
1338 a8083063 Iustin Pop
    """Check prerequisites.
1339 a8083063 Iustin Pop

1340 a8083063 Iustin Pop
    This checks:
1341 a8083063 Iustin Pop
     - the new node is not already in the config
1342 a8083063 Iustin Pop
     - it is resolvable
1343 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1344 a8083063 Iustin Pop

1345 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1346 a8083063 Iustin Pop

1347 a8083063 Iustin Pop
    """
1348 a8083063 Iustin Pop
    node_name = self.op.node_name
1349 a8083063 Iustin Pop
    cfg = self.cfg
1350 a8083063 Iustin Pop
1351 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1352 a8083063 Iustin Pop
1353 bcf043c9 Iustin Pop
    node = dns_data.name
1354 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1355 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1356 a8083063 Iustin Pop
    if secondary_ip is None:
1357 a8083063 Iustin Pop
      secondary_ip = primary_ip
1358 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1359 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1360 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1361 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1362 a8083063 Iustin Pop
    if node in node_list:
1363 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1364 3ecf6786 Iustin Pop
                                 % node)
1365 a8083063 Iustin Pop
1366 a8083063 Iustin Pop
    for existing_node_name in node_list:
1367 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1368 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1369 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1370 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1371 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1372 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1373 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1374 a8083063 Iustin Pop
1375 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1376 a8083063 Iustin Pop
    # same as for the master
1377 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1378 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1379 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1380 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1381 a8083063 Iustin Pop
      if master_singlehomed:
1382 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1383 3ecf6786 Iustin Pop
                                   " new node has one")
1384 a8083063 Iustin Pop
      else:
1385 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1386 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1387 a8083063 Iustin Pop
1388 a8083063 Iustin Pop
    # checks reachablity
1389 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(utils.HostInfo().name,
1390 16abfbc2 Alexander Schreiber
                         primary_ip,
1391 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
1392 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1393 a8083063 Iustin Pop
1394 a8083063 Iustin Pop
    if not newbie_singlehomed:
1395 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1396 16abfbc2 Alexander Schreiber
      if not utils.TcpPing(myself.secondary_ip,
1397 16abfbc2 Alexander Schreiber
                           secondary_ip,
1398 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT):
1399 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError(
1400 16abfbc2 Alexander Schreiber
          "Node secondary ip not reachable by TCP based ping to noded port")
1401 a8083063 Iustin Pop
1402 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1403 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1404 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1405 a8083063 Iustin Pop
1406 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1407 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1408 a8083063 Iustin Pop

1409 a8083063 Iustin Pop
    """
1410 a8083063 Iustin Pop
    new_node = self.new_node
1411 a8083063 Iustin Pop
    node = new_node.name
1412 a8083063 Iustin Pop
1413 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1414 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1415 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1416 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1417 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1418 a8083063 Iustin Pop
    try:
1419 a8083063 Iustin Pop
      gntpem = f.read(8192)
1420 a8083063 Iustin Pop
    finally:
1421 a8083063 Iustin Pop
      f.close()
1422 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1423 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1424 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1425 a8083063 Iustin Pop
    # parsed by the shell sequence below
1426 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1427 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1428 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1429 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1430 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1431 a8083063 Iustin Pop
1432 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1433 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1434 a8083063 Iustin Pop
    # either by being constants or by the checks above
1435 a8083063 Iustin Pop
    ss = self.sstore
1436 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1437 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1438 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1439 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1440 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1441 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1442 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1443 a8083063 Iustin Pop
1444 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1445 a8083063 Iustin Pop
    if result.failed:
1446 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1447 3ecf6786 Iustin Pop
                               " output: %s" %
1448 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1449 a8083063 Iustin Pop
1450 a8083063 Iustin Pop
    # check connectivity
1451 a8083063 Iustin Pop
    time.sleep(4)
1452 a8083063 Iustin Pop
1453 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1454 a8083063 Iustin Pop
    if result:
1455 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1456 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1457 a8083063 Iustin Pop
                    (node, result))
1458 a8083063 Iustin Pop
      else:
1459 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1460 3ecf6786 Iustin Pop
                                 " node version %s" %
1461 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1462 a8083063 Iustin Pop
    else:
1463 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1464 a8083063 Iustin Pop
1465 a8083063 Iustin Pop
    # setup ssh on node
1466 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1467 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1468 a8083063 Iustin Pop
    keyarray = []
1469 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1470 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1471 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1472 a8083063 Iustin Pop
1473 a8083063 Iustin Pop
    for i in keyfiles:
1474 a8083063 Iustin Pop
      f = open(i, 'r')
1475 a8083063 Iustin Pop
      try:
1476 a8083063 Iustin Pop
        keyarray.append(f.read())
1477 a8083063 Iustin Pop
      finally:
1478 a8083063 Iustin Pop
        f.close()
1479 a8083063 Iustin Pop
1480 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1481 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1482 a8083063 Iustin Pop
1483 a8083063 Iustin Pop
    if not result:
1484 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1485 a8083063 Iustin Pop
1486 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1487 c8a0948f Michael Hanselmann
    hi = utils.HostInfo(name=new_node.name)
1488 c8a0948f Michael Hanselmann
    utils.AddEtcHostsEntry(constants.ETC_HOSTS, new_node.name, hi.ip)
1489 c8a0948f Michael Hanselmann
    utils.AddEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName(), hi.ip)
1490 c8a0948f Michael Hanselmann
    del hi
1491 c8a0948f Michael Hanselmann
1492 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1493 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1494 a8083063 Iustin Pop
1495 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1496 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1497 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1498 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1499 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1500 16abfbc2 Alexander Schreiber
                                    10, False):
1501 3ecf6786 Iustin Pop
        raise errors.OpExecError("Node claims it doesn't have the"
1502 3ecf6786 Iustin Pop
                                 " secondary ip you gave (%s).\n"
1503 3ecf6786 Iustin Pop
                                 "Please fix and re-run this command." %
1504 3ecf6786 Iustin Pop
                                 new_node.secondary_ip)
1505 a8083063 Iustin Pop
1506 ff98055b Iustin Pop
    success, msg = ssh.VerifyNodeHostname(node)
1507 ff98055b Iustin Pop
    if not success:
1508 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1509 ff98055b Iustin Pop
                               " than the one the resolver gives: %s.\n"
1510 ff98055b Iustin Pop
                               "Please fix and re-run this command." %
1511 ff98055b Iustin Pop
                               (node, msg))
1512 ff98055b Iustin Pop
1513 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1514 a8083063 Iustin Pop
    # including the node just added
1515 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1516 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1517 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1518 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1519 a8083063 Iustin Pop
1520 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1521 82122173 Iustin Pop
    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
1522 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1523 a8083063 Iustin Pop
      for to_node in dist_nodes:
1524 a8083063 Iustin Pop
        if not result[to_node]:
1525 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1526 a8083063 Iustin Pop
                       (fname, to_node))
1527 a8083063 Iustin Pop
1528 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1529 a8083063 Iustin Pop
    for fname in to_copy:
1530 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1531 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1532 a8083063 Iustin Pop
1533 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1534 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1535 a8083063 Iustin Pop
1536 a8083063 Iustin Pop
1537 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1538 a8083063 Iustin Pop
  """Failover the master node to the current node.
1539 a8083063 Iustin Pop

1540 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1541 a8083063 Iustin Pop

1542 a8083063 Iustin Pop
  """
1543 a8083063 Iustin Pop
  HPATH = "master-failover"
1544 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1545 a8083063 Iustin Pop
  REQ_MASTER = False
1546 a8083063 Iustin Pop
  _OP_REQP = []
1547 a8083063 Iustin Pop
1548 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1549 a8083063 Iustin Pop
    """Build hooks env.
1550 a8083063 Iustin Pop

1551 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1552 a8083063 Iustin Pop
    the nodes in the post phase.
1553 a8083063 Iustin Pop

1554 a8083063 Iustin Pop
    """
1555 a8083063 Iustin Pop
    env = {
1556 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1557 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1558 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1559 a8083063 Iustin Pop
      }
1560 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1561 a8083063 Iustin Pop
1562 a8083063 Iustin Pop
  def CheckPrereq(self):
1563 a8083063 Iustin Pop
    """Check prerequisites.
1564 a8083063 Iustin Pop

1565 a8083063 Iustin Pop
    This checks that we are not already the master.
1566 a8083063 Iustin Pop

1567 a8083063 Iustin Pop
    """
1568 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1569 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1570 a8083063 Iustin Pop
1571 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1572 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1573 3ecf6786 Iustin Pop
                                 " where you want the new master to be.\n"
1574 3ecf6786 Iustin Pop
                                 "%s is already the master" %
1575 3ecf6786 Iustin Pop
                                 self.old_master)
1576 a8083063 Iustin Pop
1577 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1578 a8083063 Iustin Pop
    """Failover the master node.
1579 a8083063 Iustin Pop

1580 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1581 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1582 a8083063 Iustin Pop
    master.
1583 a8083063 Iustin Pop

1584 a8083063 Iustin Pop
    """
1585 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1586 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1587 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1588 a8083063 Iustin Pop
1589 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1590 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1591 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1592 a8083063 Iustin Pop
1593 880478f8 Iustin Pop
    ss = self.sstore
1594 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1595 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1596 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1597 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1598 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1599 880478f8 Iustin Pop
1600 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1601 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1602 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1603 880478f8 Iustin Pop
      feedback_fn("Error in activating the master IP on the new master,\n"
1604 880478f8 Iustin Pop
                  "please fix manually.")
1605 a8083063 Iustin Pop
1606 a8083063 Iustin Pop
1607 a8083063 Iustin Pop
1608 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1609 a8083063 Iustin Pop
  """Query cluster configuration.
1610 a8083063 Iustin Pop

1611 a8083063 Iustin Pop
  """
1612 a8083063 Iustin Pop
  _OP_REQP = []
1613 59322403 Iustin Pop
  REQ_MASTER = False
1614 a8083063 Iustin Pop
1615 a8083063 Iustin Pop
  def CheckPrereq(self):
1616 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1617 a8083063 Iustin Pop

1618 a8083063 Iustin Pop
    """
1619 a8083063 Iustin Pop
    pass
1620 a8083063 Iustin Pop
1621 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1622 a8083063 Iustin Pop
    """Return cluster config.
1623 a8083063 Iustin Pop

1624 a8083063 Iustin Pop
    """
1625 a8083063 Iustin Pop
    result = {
1626 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1627 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1628 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1629 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1630 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1631 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1632 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1633 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1634 a8083063 Iustin Pop
      }
1635 a8083063 Iustin Pop
1636 a8083063 Iustin Pop
    return result
1637 a8083063 Iustin Pop
1638 a8083063 Iustin Pop
1639 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1640 a8083063 Iustin Pop
  """Copy file to cluster.
1641 a8083063 Iustin Pop

1642 a8083063 Iustin Pop
  """
1643 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1644 a8083063 Iustin Pop
1645 a8083063 Iustin Pop
  def CheckPrereq(self):
1646 a8083063 Iustin Pop
    """Check prerequisites.
1647 a8083063 Iustin Pop

1648 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1649 a8083063 Iustin Pop
    of nodes is valid.
1650 a8083063 Iustin Pop

1651 a8083063 Iustin Pop
    """
1652 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1653 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1654 dcb93971 Michael Hanselmann
1655 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1656 a8083063 Iustin Pop
1657 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1658 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1659 a8083063 Iustin Pop

1660 a8083063 Iustin Pop
    Args:
1661 a8083063 Iustin Pop
      opts - class with options as members
1662 a8083063 Iustin Pop
      args - list containing a single element, the file name
1663 a8083063 Iustin Pop
    Opts used:
1664 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1665 a8083063 Iustin Pop

1666 a8083063 Iustin Pop
    """
1667 a8083063 Iustin Pop
    filename = self.op.filename
1668 a8083063 Iustin Pop
1669 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
1670 a8083063 Iustin Pop
1671 a7ba5e53 Iustin Pop
    for node in self.nodes:
1672 a8083063 Iustin Pop
      if node == myname:
1673 a8083063 Iustin Pop
        continue
1674 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1675 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1676 a8083063 Iustin Pop
1677 a8083063 Iustin Pop
1678 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1679 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1680 a8083063 Iustin Pop

1681 a8083063 Iustin Pop
  """
1682 a8083063 Iustin Pop
  _OP_REQP = []
1683 a8083063 Iustin Pop
1684 a8083063 Iustin Pop
  def CheckPrereq(self):
1685 a8083063 Iustin Pop
    """No prerequisites.
1686 a8083063 Iustin Pop

1687 a8083063 Iustin Pop
    """
1688 a8083063 Iustin Pop
    pass
1689 a8083063 Iustin Pop
1690 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1691 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1692 a8083063 Iustin Pop

1693 a8083063 Iustin Pop
    """
1694 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1695 a8083063 Iustin Pop
1696 a8083063 Iustin Pop
1697 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1698 a8083063 Iustin Pop
  """Run a command on some nodes.
1699 a8083063 Iustin Pop

1700 a8083063 Iustin Pop
  """
1701 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1702 a8083063 Iustin Pop
1703 a8083063 Iustin Pop
  def CheckPrereq(self):
1704 a8083063 Iustin Pop
    """Check prerequisites.
1705 a8083063 Iustin Pop

1706 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1707 a8083063 Iustin Pop

1708 a8083063 Iustin Pop
    """
1709 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1710 a8083063 Iustin Pop
1711 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1712 a8083063 Iustin Pop
    """Run a command on some nodes.
1713 a8083063 Iustin Pop

1714 a8083063 Iustin Pop
    """
1715 a8083063 Iustin Pop
    data = []
1716 a8083063 Iustin Pop
    for node in self.nodes:
1717 a7ba5e53 Iustin Pop
      result = ssh.SSHCall(node, "root", self.op.command)
1718 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1719 a8083063 Iustin Pop
1720 a8083063 Iustin Pop
    return data
1721 a8083063 Iustin Pop
1722 a8083063 Iustin Pop
1723 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1724 a8083063 Iustin Pop
  """Bring up an instance's disks.
1725 a8083063 Iustin Pop

1726 a8083063 Iustin Pop
  """
1727 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1728 a8083063 Iustin Pop
1729 a8083063 Iustin Pop
  def CheckPrereq(self):
1730 a8083063 Iustin Pop
    """Check prerequisites.
1731 a8083063 Iustin Pop

1732 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1733 a8083063 Iustin Pop

1734 a8083063 Iustin Pop
    """
1735 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1736 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1737 a8083063 Iustin Pop
    if instance is None:
1738 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1739 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1740 a8083063 Iustin Pop
    self.instance = instance
1741 a8083063 Iustin Pop
1742 a8083063 Iustin Pop
1743 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1744 a8083063 Iustin Pop
    """Activate the disks.
1745 a8083063 Iustin Pop

1746 a8083063 Iustin Pop
    """
1747 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1748 a8083063 Iustin Pop
    if not disks_ok:
1749 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1750 a8083063 Iustin Pop
1751 a8083063 Iustin Pop
    return disks_info
1752 a8083063 Iustin Pop
1753 a8083063 Iustin Pop
1754 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1755 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1756 a8083063 Iustin Pop

1757 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1758 a8083063 Iustin Pop

1759 a8083063 Iustin Pop
  Args:
1760 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1761 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1762 a8083063 Iustin Pop
                        in an error return from the function
1763 a8083063 Iustin Pop

1764 a8083063 Iustin Pop
  Returns:
1765 a8083063 Iustin Pop
    false if the operation failed
1766 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1767 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1768 a8083063 Iustin Pop
  """
1769 a8083063 Iustin Pop
  device_info = []
1770 a8083063 Iustin Pop
  disks_ok = True
1771 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1772 a8083063 Iustin Pop
    master_result = None
1773 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1774 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1775 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1776 3f78eef2 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk,
1777 3f78eef2 Iustin Pop
                                          instance.name, is_primary)
1778 a8083063 Iustin Pop
      if not result:
1779 a8083063 Iustin Pop
        logger.Error("could not prepare block device %s on node %s (is_pri"
1780 a8083063 Iustin Pop
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1781 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1782 a8083063 Iustin Pop
          disks_ok = False
1783 a8083063 Iustin Pop
      if is_primary:
1784 a8083063 Iustin Pop
        master_result = result
1785 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1786 a8083063 Iustin Pop
                        master_result))
1787 a8083063 Iustin Pop
1788 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1789 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1790 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1791 b352ab5b Iustin Pop
  for disk in instance.disks:
1792 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1793 b352ab5b Iustin Pop
1794 a8083063 Iustin Pop
  return disks_ok, device_info
1795 a8083063 Iustin Pop
1796 a8083063 Iustin Pop
1797 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1798 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1799 3ecf6786 Iustin Pop

1800 3ecf6786 Iustin Pop
  """
1801 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1802 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1803 fe7b0351 Michael Hanselmann
  if not disks_ok:
1804 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1805 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1806 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1807 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1808 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1809 fe7b0351 Michael Hanselmann
1810 fe7b0351 Michael Hanselmann
1811 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1812 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1813 a8083063 Iustin Pop

1814 a8083063 Iustin Pop
  """
1815 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1816 a8083063 Iustin Pop
1817 a8083063 Iustin Pop
  def CheckPrereq(self):
1818 a8083063 Iustin Pop
    """Check prerequisites.
1819 a8083063 Iustin Pop

1820 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1821 a8083063 Iustin Pop

1822 a8083063 Iustin Pop
    """
1823 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1824 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1825 a8083063 Iustin Pop
    if instance is None:
1826 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1827 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1828 a8083063 Iustin Pop
    self.instance = instance
1829 a8083063 Iustin Pop
1830 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1831 a8083063 Iustin Pop
    """Deactivate the disks
1832 a8083063 Iustin Pop

1833 a8083063 Iustin Pop
    """
1834 a8083063 Iustin Pop
    instance = self.instance
1835 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1836 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1837 a8083063 Iustin Pop
    if not type(ins_l) is list:
1838 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1839 3ecf6786 Iustin Pop
                               instance.primary_node)
1840 a8083063 Iustin Pop
1841 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1842 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1843 3ecf6786 Iustin Pop
                               " block devices.")
1844 a8083063 Iustin Pop
1845 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1846 a8083063 Iustin Pop
1847 a8083063 Iustin Pop
1848 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1849 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1850 a8083063 Iustin Pop

1851 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1852 a8083063 Iustin Pop

1853 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1854 a8083063 Iustin Pop
  ignored.
1855 a8083063 Iustin Pop

1856 a8083063 Iustin Pop
  """
1857 a8083063 Iustin Pop
  result = True
1858 a8083063 Iustin Pop
  for disk in instance.disks:
1859 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1860 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1861 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1862 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1863 a8083063 Iustin Pop
                     (disk.iv_name, node))
1864 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1865 a8083063 Iustin Pop
          result = False
1866 a8083063 Iustin Pop
  return result
1867 a8083063 Iustin Pop
1868 a8083063 Iustin Pop
1869 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1870 a8083063 Iustin Pop
  """Starts an instance.
1871 a8083063 Iustin Pop

1872 a8083063 Iustin Pop
  """
1873 a8083063 Iustin Pop
  HPATH = "instance-start"
1874 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1875 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1876 a8083063 Iustin Pop
1877 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1878 a8083063 Iustin Pop
    """Build hooks env.
1879 a8083063 Iustin Pop

1880 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1881 a8083063 Iustin Pop

1882 a8083063 Iustin Pop
    """
1883 a8083063 Iustin Pop
    env = {
1884 a8083063 Iustin Pop
      "FORCE": self.op.force,
1885 a8083063 Iustin Pop
      }
1886 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1887 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1888 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1889 a8083063 Iustin Pop
    return env, nl, nl
1890 a8083063 Iustin Pop
1891 a8083063 Iustin Pop
  def CheckPrereq(self):
1892 a8083063 Iustin Pop
    """Check prerequisites.
1893 a8083063 Iustin Pop

1894 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1895 a8083063 Iustin Pop

1896 a8083063 Iustin Pop
    """
1897 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1898 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1899 a8083063 Iustin Pop
    if instance is None:
1900 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1901 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1902 a8083063 Iustin Pop
1903 a8083063 Iustin Pop
    # check bridges existance
1904 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
1905 a8083063 Iustin Pop
1906 a8083063 Iustin Pop
    self.instance = instance
1907 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1908 a8083063 Iustin Pop
1909 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1910 a8083063 Iustin Pop
    """Start the instance.
1911 a8083063 Iustin Pop

1912 a8083063 Iustin Pop
    """
1913 a8083063 Iustin Pop
    instance = self.instance
1914 a8083063 Iustin Pop
    force = self.op.force
1915 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1916 a8083063 Iustin Pop
1917 a8083063 Iustin Pop
    node_current = instance.primary_node
1918 a8083063 Iustin Pop
1919 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1920 a8083063 Iustin Pop
    if not nodeinfo:
1921 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact node %s for infos" %
1922 3ecf6786 Iustin Pop
                               (node_current))
1923 a8083063 Iustin Pop
1924 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
1925 a8083063 Iustin Pop
    memory = instance.memory
1926 a8083063 Iustin Pop
    if memory > freememory:
1927 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to start instance"
1928 3ecf6786 Iustin Pop
                               " %s on node %s"
1929 3ecf6786 Iustin Pop
                               " needed %s MiB, available %s MiB" %
1930 3ecf6786 Iustin Pop
                               (instance.name, node_current, memory,
1931 3ecf6786 Iustin Pop
                                freememory))
1932 a8083063 Iustin Pop
1933 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
1934 a8083063 Iustin Pop
1935 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
1936 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
1937 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
1938 a8083063 Iustin Pop
1939 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
1940 a8083063 Iustin Pop
1941 a8083063 Iustin Pop
1942 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
1943 bf6929a2 Alexander Schreiber
  """Reboot an instance.
1944 bf6929a2 Alexander Schreiber

1945 bf6929a2 Alexander Schreiber
  """
1946 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
1947 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
1948 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
1949 bf6929a2 Alexander Schreiber
1950 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
1951 bf6929a2 Alexander Schreiber
    """Build hooks env.
1952 bf6929a2 Alexander Schreiber

1953 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
1954 bf6929a2 Alexander Schreiber

1955 bf6929a2 Alexander Schreiber
    """
1956 bf6929a2 Alexander Schreiber
    env = {
1957 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
1958 bf6929a2 Alexander Schreiber
      }
1959 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1960 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1961 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
1962 bf6929a2 Alexander Schreiber
    return env, nl, nl
1963 bf6929a2 Alexander Schreiber
1964 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
1965 bf6929a2 Alexander Schreiber
    """Check prerequisites.
1966 bf6929a2 Alexander Schreiber

1967 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
1968 bf6929a2 Alexander Schreiber

1969 bf6929a2 Alexander Schreiber
    """
1970 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
1971 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
1972 bf6929a2 Alexander Schreiber
    if instance is None:
1973 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
1974 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
1975 bf6929a2 Alexander Schreiber
1976 bf6929a2 Alexander Schreiber
    # check bridges existance
1977 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
1978 bf6929a2 Alexander Schreiber
1979 bf6929a2 Alexander Schreiber
    self.instance = instance
1980 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
1981 bf6929a2 Alexander Schreiber
1982 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
1983 bf6929a2 Alexander Schreiber
    """Reboot the instance.
1984 bf6929a2 Alexander Schreiber

1985 bf6929a2 Alexander Schreiber
    """
1986 bf6929a2 Alexander Schreiber
    instance = self.instance
1987 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
1988 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
1989 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
1990 bf6929a2 Alexander Schreiber
1991 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
1992 bf6929a2 Alexander Schreiber
1993 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
1994 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
1995 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
1996 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
1997 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
1998 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
1999 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2000 bf6929a2 Alexander Schreiber
2001 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2002 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2003 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2004 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2005 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2006 bf6929a2 Alexander Schreiber
    else:
2007 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2008 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2009 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2010 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2011 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2012 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2013 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2014 bf6929a2 Alexander Schreiber
2015 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2016 bf6929a2 Alexander Schreiber
2017 bf6929a2 Alexander Schreiber
2018 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2019 a8083063 Iustin Pop
  """Shutdown an instance.
2020 a8083063 Iustin Pop

2021 a8083063 Iustin Pop
  """
2022 a8083063 Iustin Pop
  HPATH = "instance-stop"
2023 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2024 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2025 a8083063 Iustin Pop
2026 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2027 a8083063 Iustin Pop
    """Build hooks env.
2028 a8083063 Iustin Pop

2029 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2030 a8083063 Iustin Pop

2031 a8083063 Iustin Pop
    """
2032 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2033 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2034 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2035 a8083063 Iustin Pop
    return env, nl, nl
2036 a8083063 Iustin Pop
2037 a8083063 Iustin Pop
  def CheckPrereq(self):
2038 a8083063 Iustin Pop
    """Check prerequisites.
2039 a8083063 Iustin Pop

2040 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2041 a8083063 Iustin Pop

2042 a8083063 Iustin Pop
    """
2043 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2044 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2045 a8083063 Iustin Pop
    if instance is None:
2046 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2047 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2048 a8083063 Iustin Pop
    self.instance = instance
2049 a8083063 Iustin Pop
2050 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2051 a8083063 Iustin Pop
    """Shutdown the instance.
2052 a8083063 Iustin Pop

2053 a8083063 Iustin Pop
    """
2054 a8083063 Iustin Pop
    instance = self.instance
2055 a8083063 Iustin Pop
    node_current = instance.primary_node
2056 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2057 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2058 a8083063 Iustin Pop
2059 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2060 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2061 a8083063 Iustin Pop
2062 a8083063 Iustin Pop
2063 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2064 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2065 fe7b0351 Michael Hanselmann

2066 fe7b0351 Michael Hanselmann
  """
2067 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2068 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2069 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2070 fe7b0351 Michael Hanselmann
2071 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2072 fe7b0351 Michael Hanselmann
    """Build hooks env.
2073 fe7b0351 Michael Hanselmann

2074 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2075 fe7b0351 Michael Hanselmann

2076 fe7b0351 Michael Hanselmann
    """
2077 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2078 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2079 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2080 fe7b0351 Michael Hanselmann
    return env, nl, nl
2081 fe7b0351 Michael Hanselmann
2082 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2083 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2084 fe7b0351 Michael Hanselmann

2085 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2086 fe7b0351 Michael Hanselmann

2087 fe7b0351 Michael Hanselmann
    """
2088 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2089 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2090 fe7b0351 Michael Hanselmann
    if instance is None:
2091 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2092 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2093 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2094 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2095 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2096 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2097 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2098 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2099 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2100 fe7b0351 Michael Hanselmann
    if remote_info:
2101 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2102 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2103 3ecf6786 Iustin Pop
                                  instance.primary_node))
2104 d0834de3 Michael Hanselmann
2105 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2106 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2107 d0834de3 Michael Hanselmann
      # OS verification
2108 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2109 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2110 d0834de3 Michael Hanselmann
      if pnode is None:
2111 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2112 3ecf6786 Iustin Pop
                                   self.op.pnode)
2113 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2114 dfa96ded Guido Trotter
      if not os_obj:
2115 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2116 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2117 d0834de3 Michael Hanselmann
2118 fe7b0351 Michael Hanselmann
    self.instance = instance
2119 fe7b0351 Michael Hanselmann
2120 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2121 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2122 fe7b0351 Michael Hanselmann

2123 fe7b0351 Michael Hanselmann
    """
2124 fe7b0351 Michael Hanselmann
    inst = self.instance
2125 fe7b0351 Michael Hanselmann
2126 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2127 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2128 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2129 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2130 d0834de3 Michael Hanselmann
2131 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2132 fe7b0351 Michael Hanselmann
    try:
2133 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2134 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2135 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not install OS for instance %s "
2136 3ecf6786 Iustin Pop
                                 "on node %s" %
2137 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2138 fe7b0351 Michael Hanselmann
    finally:
2139 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2140 fe7b0351 Michael Hanselmann
2141 fe7b0351 Michael Hanselmann
2142 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2143 decd5f45 Iustin Pop
  """Rename an instance.
2144 decd5f45 Iustin Pop

2145 decd5f45 Iustin Pop
  """
2146 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2147 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2148 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2149 decd5f45 Iustin Pop
2150 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2151 decd5f45 Iustin Pop
    """Build hooks env.
2152 decd5f45 Iustin Pop

2153 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2154 decd5f45 Iustin Pop

2155 decd5f45 Iustin Pop
    """
2156 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2157 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2158 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2159 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2160 decd5f45 Iustin Pop
    return env, nl, nl
2161 decd5f45 Iustin Pop
2162 decd5f45 Iustin Pop
  def CheckPrereq(self):
2163 decd5f45 Iustin Pop
    """Check prerequisites.
2164 decd5f45 Iustin Pop

2165 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2166 decd5f45 Iustin Pop

2167 decd5f45 Iustin Pop
    """
2168 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2169 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2170 decd5f45 Iustin Pop
    if instance is None:
2171 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2172 decd5f45 Iustin Pop
                                 self.op.instance_name)
2173 decd5f45 Iustin Pop
    if instance.status != "down":
2174 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2175 decd5f45 Iustin Pop
                                 self.op.instance_name)
2176 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2177 decd5f45 Iustin Pop
    if remote_info:
2178 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2179 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2180 decd5f45 Iustin Pop
                                  instance.primary_node))
2181 decd5f45 Iustin Pop
    self.instance = instance
2182 decd5f45 Iustin Pop
2183 decd5f45 Iustin Pop
    # new name verification
2184 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2185 decd5f45 Iustin Pop
2186 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2187 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2188 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2189 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2190 decd5f45 Iustin Pop
      if not result.failed:
2191 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2192 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2193 decd5f45 Iustin Pop
2194 decd5f45 Iustin Pop
2195 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2196 decd5f45 Iustin Pop
    """Reinstall the instance.
2197 decd5f45 Iustin Pop

2198 decd5f45 Iustin Pop
    """
2199 decd5f45 Iustin Pop
    inst = self.instance
2200 decd5f45 Iustin Pop
    old_name = inst.name
2201 decd5f45 Iustin Pop
2202 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2203 decd5f45 Iustin Pop
2204 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2205 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2206 decd5f45 Iustin Pop
2207 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2208 decd5f45 Iustin Pop
    try:
2209 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2210 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2211 decd5f45 Iustin Pop
        msg = ("Could run OS rename script for instance %s\n"
2212 decd5f45 Iustin Pop
               "on node %s\n"
2213 decd5f45 Iustin Pop
               "(but the instance has been renamed in Ganeti)" %
2214 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2215 decd5f45 Iustin Pop
        logger.Error(msg)
2216 decd5f45 Iustin Pop
    finally:
2217 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2218 decd5f45 Iustin Pop
2219 decd5f45 Iustin Pop
2220 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2221 a8083063 Iustin Pop
  """Remove an instance.
2222 a8083063 Iustin Pop

2223 a8083063 Iustin Pop
  """
2224 a8083063 Iustin Pop
  HPATH = "instance-remove"
2225 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2226 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2227 a8083063 Iustin Pop
2228 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2229 a8083063 Iustin Pop
    """Build hooks env.
2230 a8083063 Iustin Pop

2231 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2232 a8083063 Iustin Pop

2233 a8083063 Iustin Pop
    """
2234 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2235 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2236 a8083063 Iustin Pop
    return env, nl, nl
2237 a8083063 Iustin Pop
2238 a8083063 Iustin Pop
  def CheckPrereq(self):
2239 a8083063 Iustin Pop
    """Check prerequisites.
2240 a8083063 Iustin Pop

2241 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2242 a8083063 Iustin Pop

2243 a8083063 Iustin Pop
    """
2244 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2245 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2246 a8083063 Iustin Pop
    if instance is None:
2247 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2248 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2249 a8083063 Iustin Pop
    self.instance = instance
2250 a8083063 Iustin Pop
2251 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2252 a8083063 Iustin Pop
    """Remove the instance.
2253 a8083063 Iustin Pop

2254 a8083063 Iustin Pop
    """
2255 a8083063 Iustin Pop
    instance = self.instance
2256 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2257 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2258 a8083063 Iustin Pop
2259 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2260 1d67656e Iustin Pop
      if self.op.ignore_failures:
2261 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2262 1d67656e Iustin Pop
      else:
2263 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2264 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2265 a8083063 Iustin Pop
2266 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2267 a8083063 Iustin Pop
2268 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2269 1d67656e Iustin Pop
      if self.op.ignore_failures:
2270 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2271 1d67656e Iustin Pop
      else:
2272 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2273 a8083063 Iustin Pop
2274 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2275 a8083063 Iustin Pop
2276 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2277 a8083063 Iustin Pop
2278 a8083063 Iustin Pop
2279 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2280 a8083063 Iustin Pop
  """Logical unit for querying instances.
2281 a8083063 Iustin Pop

2282 a8083063 Iustin Pop
  """
2283 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2284 a8083063 Iustin Pop
2285 a8083063 Iustin Pop
  def CheckPrereq(self):
2286 a8083063 Iustin Pop
    """Check prerequisites.
2287 a8083063 Iustin Pop

2288 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2289 a8083063 Iustin Pop

2290 a8083063 Iustin Pop
    """
2291 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
2292 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2293 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2294 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2295 644eeef9 Iustin Pop
                               "sda_size", "sdb_size"],
2296 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2297 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2298 a8083063 Iustin Pop
2299 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2300 069dcc86 Iustin Pop
2301 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2302 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2303 a8083063 Iustin Pop

2304 a8083063 Iustin Pop
    """
2305 069dcc86 Iustin Pop
    instance_names = self.wanted
2306 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2307 a8083063 Iustin Pop
                     in instance_names]
2308 a8083063 Iustin Pop
2309 a8083063 Iustin Pop
    # begin data gathering
2310 a8083063 Iustin Pop
2311 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2312 a8083063 Iustin Pop
2313 a8083063 Iustin Pop
    bad_nodes = []
2314 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2315 a8083063 Iustin Pop
      live_data = {}
2316 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2317 a8083063 Iustin Pop
      for name in nodes:
2318 a8083063 Iustin Pop
        result = node_data[name]
2319 a8083063 Iustin Pop
        if result:
2320 a8083063 Iustin Pop
          live_data.update(result)
2321 a8083063 Iustin Pop
        elif result == False:
2322 a8083063 Iustin Pop
          bad_nodes.append(name)
2323 a8083063 Iustin Pop
        # else no instance is alive
2324 a8083063 Iustin Pop
    else:
2325 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2326 a8083063 Iustin Pop
2327 a8083063 Iustin Pop
    # end data gathering
2328 a8083063 Iustin Pop
2329 a8083063 Iustin Pop
    output = []
2330 a8083063 Iustin Pop
    for instance in instance_list:
2331 a8083063 Iustin Pop
      iout = []
2332 a8083063 Iustin Pop
      for field in self.op.output_fields:
2333 a8083063 Iustin Pop
        if field == "name":
2334 a8083063 Iustin Pop
          val = instance.name
2335 a8083063 Iustin Pop
        elif field == "os":
2336 a8083063 Iustin Pop
          val = instance.os
2337 a8083063 Iustin Pop
        elif field == "pnode":
2338 a8083063 Iustin Pop
          val = instance.primary_node
2339 a8083063 Iustin Pop
        elif field == "snodes":
2340 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2341 a8083063 Iustin Pop
        elif field == "admin_state":
2342 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2343 a8083063 Iustin Pop
        elif field == "oper_state":
2344 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2345 8a23d2d3 Iustin Pop
            val = None
2346 a8083063 Iustin Pop
          else:
2347 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2348 a8083063 Iustin Pop
        elif field == "admin_ram":
2349 a8083063 Iustin Pop
          val = instance.memory
2350 a8083063 Iustin Pop
        elif field == "oper_ram":
2351 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2352 8a23d2d3 Iustin Pop
            val = None
2353 a8083063 Iustin Pop
          elif instance.name in live_data:
2354 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2355 a8083063 Iustin Pop
          else:
2356 a8083063 Iustin Pop
            val = "-"
2357 a8083063 Iustin Pop
        elif field == "disk_template":
2358 a8083063 Iustin Pop
          val = instance.disk_template
2359 a8083063 Iustin Pop
        elif field == "ip":
2360 a8083063 Iustin Pop
          val = instance.nics[0].ip
2361 a8083063 Iustin Pop
        elif field == "bridge":
2362 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2363 a8083063 Iustin Pop
        elif field == "mac":
2364 a8083063 Iustin Pop
          val = instance.nics[0].mac
2365 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2366 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2367 644eeef9 Iustin Pop
          if disk is None:
2368 8a23d2d3 Iustin Pop
            val = None
2369 644eeef9 Iustin Pop
          else:
2370 644eeef9 Iustin Pop
            val = disk.size
2371 a8083063 Iustin Pop
        else:
2372 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2373 a8083063 Iustin Pop
        iout.append(val)
2374 a8083063 Iustin Pop
      output.append(iout)
2375 a8083063 Iustin Pop
2376 a8083063 Iustin Pop
    return output
2377 a8083063 Iustin Pop
2378 a8083063 Iustin Pop
2379 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2380 a8083063 Iustin Pop
  """Failover an instance.
2381 a8083063 Iustin Pop

2382 a8083063 Iustin Pop
  """
2383 a8083063 Iustin Pop
  HPATH = "instance-failover"
2384 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2385 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2386 a8083063 Iustin Pop
2387 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2388 a8083063 Iustin Pop
    """Build hooks env.
2389 a8083063 Iustin Pop

2390 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2391 a8083063 Iustin Pop

2392 a8083063 Iustin Pop
    """
2393 a8083063 Iustin Pop
    env = {
2394 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2395 a8083063 Iustin Pop
      }
2396 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2397 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2398 a8083063 Iustin Pop
    return env, nl, nl
2399 a8083063 Iustin Pop
2400 a8083063 Iustin Pop
  def CheckPrereq(self):
2401 a8083063 Iustin Pop
    """Check prerequisites.
2402 a8083063 Iustin Pop

2403 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2404 a8083063 Iustin Pop

2405 a8083063 Iustin Pop
    """
2406 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2407 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2408 a8083063 Iustin Pop
    if instance is None:
2409 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2410 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2411 a8083063 Iustin Pop
2412 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2413 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2414 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2415 2a710df1 Michael Hanselmann
2416 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2417 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2418 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2419 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2420 2a710df1 Michael Hanselmann
2421 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2422 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2423 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2424 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2425 3a7c308e Guido Trotter
    if not info:
2426 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
2427 3ecf6786 Iustin Pop
                                 " from node '%s'" % nodeinfo)
2428 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2429 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Not enough memory on target node %s."
2430 3ecf6786 Iustin Pop
                                 " %d MB available, %d MB required" %
2431 3ecf6786 Iustin Pop
                                 (target_node, info['memory_free'],
2432 3ecf6786 Iustin Pop
                                  instance.memory))
2433 3a7c308e Guido Trotter
2434 a8083063 Iustin Pop
    # check bridge existance
2435 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2436 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2437 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2438 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2439 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2440 a8083063 Iustin Pop
2441 a8083063 Iustin Pop
    self.instance = instance
2442 a8083063 Iustin Pop
2443 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2444 a8083063 Iustin Pop
    """Failover an instance.
2445 a8083063 Iustin Pop

2446 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2447 a8083063 Iustin Pop
    starting it on the secondary.
2448 a8083063 Iustin Pop

2449 a8083063 Iustin Pop
    """
2450 a8083063 Iustin Pop
    instance = self.instance
2451 a8083063 Iustin Pop
2452 a8083063 Iustin Pop
    source_node = instance.primary_node
2453 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2454 a8083063 Iustin Pop
2455 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2456 a8083063 Iustin Pop
    for dev in instance.disks:
2457 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2458 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2459 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2460 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2461 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2462 a8083063 Iustin Pop
2463 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2464 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2465 a8083063 Iustin Pop
2466 a8083063 Iustin Pop
    if not nodeinfo:
2467 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact target node %s." %
2468 3ecf6786 Iustin Pop
                               target_node)
2469 a8083063 Iustin Pop
2470 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2471 a8083063 Iustin Pop
    memory = instance.memory
2472 a8083063 Iustin Pop
    if memory > free_memory:
2473 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to create instance %s on"
2474 3ecf6786 Iustin Pop
                               " node %s. needed %s MiB, available %s MiB" %
2475 3ecf6786 Iustin Pop
                               (instance.name, target_node, memory,
2476 3ecf6786 Iustin Pop
                                free_memory))
2477 a8083063 Iustin Pop
2478 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2479 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2480 a8083063 Iustin Pop
                (instance.name, source_node))
2481 a8083063 Iustin Pop
2482 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2483 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2484 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2485 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2486 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2487 24a40d57 Iustin Pop
      else:
2488 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2489 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2490 a8083063 Iustin Pop
2491 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2492 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2493 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2494 a8083063 Iustin Pop
2495 a8083063 Iustin Pop
    instance.primary_node = target_node
2496 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2497 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2498 a8083063 Iustin Pop
2499 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2500 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2501 a8083063 Iustin Pop
                (instance.name, target_node))
2502 a8083063 Iustin Pop
2503 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2504 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2505 a8083063 Iustin Pop
    if not disks_ok:
2506 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2507 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2508 a8083063 Iustin Pop
2509 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2510 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2511 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2512 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2513 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2514 a8083063 Iustin Pop
2515 a8083063 Iustin Pop
2516 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2517 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2518 a8083063 Iustin Pop

2519 a8083063 Iustin Pop
  This always creates all devices.
2520 a8083063 Iustin Pop

2521 a8083063 Iustin Pop
  """
2522 a8083063 Iustin Pop
  if device.children:
2523 a8083063 Iustin Pop
    for child in device.children:
2524 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2525 a8083063 Iustin Pop
        return False
2526 a8083063 Iustin Pop
2527 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2528 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2529 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2530 a8083063 Iustin Pop
  if not new_id:
2531 a8083063 Iustin Pop
    return False
2532 a8083063 Iustin Pop
  if device.physical_id is None:
2533 a8083063 Iustin Pop
    device.physical_id = new_id
2534 a8083063 Iustin Pop
  return True
2535 a8083063 Iustin Pop
2536 a8083063 Iustin Pop
2537 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2538 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2539 a8083063 Iustin Pop

2540 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2541 a8083063 Iustin Pop
  all its children.
2542 a8083063 Iustin Pop

2543 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2544 a8083063 Iustin Pop

2545 a8083063 Iustin Pop
  """
2546 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2547 a8083063 Iustin Pop
    force = True
2548 a8083063 Iustin Pop
  if device.children:
2549 a8083063 Iustin Pop
    for child in device.children:
2550 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2551 3f78eef2 Iustin Pop
                                        child, force, info):
2552 a8083063 Iustin Pop
        return False
2553 a8083063 Iustin Pop
2554 a8083063 Iustin Pop
  if not force:
2555 a8083063 Iustin Pop
    return True
2556 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2557 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2558 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2559 a8083063 Iustin Pop
  if not new_id:
2560 a8083063 Iustin Pop
    return False
2561 a8083063 Iustin Pop
  if device.physical_id is None:
2562 a8083063 Iustin Pop
    device.physical_id = new_id
2563 a8083063 Iustin Pop
  return True
2564 a8083063 Iustin Pop
2565 a8083063 Iustin Pop
2566 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2567 923b1523 Iustin Pop
  """Generate a suitable LV name.
2568 923b1523 Iustin Pop

2569 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2570 923b1523 Iustin Pop

2571 923b1523 Iustin Pop
  """
2572 923b1523 Iustin Pop
  results = []
2573 923b1523 Iustin Pop
  for val in exts:
2574 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2575 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2576 923b1523 Iustin Pop
  return results
2577 923b1523 Iustin Pop
2578 923b1523 Iustin Pop
2579 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2580 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2581 a8083063 Iustin Pop

2582 a8083063 Iustin Pop
  """
2583 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2584 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2585 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2586 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2587 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2588 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2589 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
2590 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2591 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2592 a8083063 Iustin Pop
  return drbd_dev
2593 a8083063 Iustin Pop
2594 a8083063 Iustin Pop
2595 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2596 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2597 a1f445d3 Iustin Pop

2598 a1f445d3 Iustin Pop
  """
2599 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2600 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2601 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2602 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2603 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2604 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2605 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2606 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2607 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2608 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2609 a1f445d3 Iustin Pop
  return drbd_dev
2610 a1f445d3 Iustin Pop
2611 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2612 a8083063 Iustin Pop
                          instance_name, primary_node,
2613 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2614 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2615 a8083063 Iustin Pop

2616 a8083063 Iustin Pop
  """
2617 a8083063 Iustin Pop
  #TODO: compute space requirements
2618 a8083063 Iustin Pop
2619 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2620 a8083063 Iustin Pop
  if template_name == "diskless":
2621 a8083063 Iustin Pop
    disks = []
2622 a8083063 Iustin Pop
  elif template_name == "plain":
2623 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2624 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2625 923b1523 Iustin Pop
2626 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2627 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2628 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2629 a8083063 Iustin Pop
                           iv_name = "sda")
2630 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2631 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2632 a8083063 Iustin Pop
                           iv_name = "sdb")
2633 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2634 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2635 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2636 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2637 923b1523 Iustin Pop
2638 923b1523 Iustin Pop
2639 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2640 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2641 fe96220b Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2642 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2643 fe96220b Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2644 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2645 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
2646 a8083063 Iustin Pop
                              size=disk_sz,
2647 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2648 fe96220b Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2649 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2650 fe96220b Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2651 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2652 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
2653 a8083063 Iustin Pop
                              size=swap_sz,
2654 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2655 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2656 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2657 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2658 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2659 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2660 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2661 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2662 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2663 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2664 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
2665 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2666 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2667 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2668 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
2669 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2670 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2671 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2672 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2673 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2674 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2675 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2676 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2677 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2678 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2679 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2680 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2681 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2682 a8083063 Iustin Pop
  else:
2683 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2684 a8083063 Iustin Pop
  return disks
2685 a8083063 Iustin Pop
2686 a8083063 Iustin Pop
2687 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2688 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2689 3ecf6786 Iustin Pop

2690 3ecf6786 Iustin Pop
  """
2691 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2692 a0c3fea1 Michael Hanselmann
2693 a0c3fea1 Michael Hanselmann
2694 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2695 a8083063 Iustin Pop
  """Create all disks for an instance.
2696 a8083063 Iustin Pop

2697 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2698 a8083063 Iustin Pop

2699 a8083063 Iustin Pop
  Args:
2700 a8083063 Iustin Pop
    instance: the instance object
2701 a8083063 Iustin Pop

2702 a8083063 Iustin Pop
  Returns:
2703 a8083063 Iustin Pop
    True or False showing the success of the creation process
2704 a8083063 Iustin Pop

2705 a8083063 Iustin Pop
  """
2706 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2707 a0c3fea1 Michael Hanselmann
2708 a8083063 Iustin Pop
  for device in instance.disks:
2709 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2710 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2711 a8083063 Iustin Pop
    #HARDCODE
2712 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2713 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2714 3f78eef2 Iustin Pop
                                        device, False, info):
2715 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2716 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2717 a8083063 Iustin Pop
        return False
2718 a8083063 Iustin Pop
    #HARDCODE
2719 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2720 3f78eef2 Iustin Pop
                                    instance, device, info):
2721 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2722 a8083063 Iustin Pop
                   device.iv_name)
2723 a8083063 Iustin Pop
      return False
2724 a8083063 Iustin Pop
  return True
2725 a8083063 Iustin Pop
2726 a8083063 Iustin Pop
2727 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2728 a8083063 Iustin Pop
  """Remove all disks for an instance.
2729 a8083063 Iustin Pop

2730 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2731 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2732 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2733 a8083063 Iustin Pop
  with `_CreateDisks()`).
2734 a8083063 Iustin Pop

2735 a8083063 Iustin Pop
  Args:
2736 a8083063 Iustin Pop
    instance: the instance object
2737 a8083063 Iustin Pop

2738 a8083063 Iustin Pop
  Returns:
2739 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2740 a8083063 Iustin Pop

2741 a8083063 Iustin Pop
  """
2742 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2743 a8083063 Iustin Pop
2744 a8083063 Iustin Pop
  result = True
2745 a8083063 Iustin Pop
  for device in instance.disks:
2746 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2747 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2748 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2749 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2750 a8083063 Iustin Pop
                     " continuing anyway" %
2751 a8083063 Iustin Pop
                     (device.iv_name, node))
2752 a8083063 Iustin Pop
        result = False
2753 a8083063 Iustin Pop
  return result
2754 a8083063 Iustin Pop
2755 a8083063 Iustin Pop
2756 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2757 a8083063 Iustin Pop
  """Create an instance.
2758 a8083063 Iustin Pop

2759 a8083063 Iustin Pop
  """
2760 a8083063 Iustin Pop
  HPATH = "instance-add"
2761 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2762 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2763 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2764 bdd55f71 Iustin Pop
              "wait_for_sync", "ip_check"]
2765 a8083063 Iustin Pop
2766 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2767 a8083063 Iustin Pop
    """Build hooks env.
2768 a8083063 Iustin Pop

2769 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2770 a8083063 Iustin Pop

2771 a8083063 Iustin Pop
    """
2772 a8083063 Iustin Pop
    env = {
2773 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2774 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2775 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2776 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2777 a8083063 Iustin Pop
      }
2778 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2779 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2780 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2781 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2782 396e1b78 Michael Hanselmann
2783 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2784 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2785 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2786 396e1b78 Michael Hanselmann
      status=self.instance_status,
2787 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2788 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2789 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2790 396e1b78 Michael Hanselmann
      nics=[(self.inst_ip, self.op.bridge)],
2791 396e1b78 Michael Hanselmann
    ))
2792 a8083063 Iustin Pop
2793 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2794 a8083063 Iustin Pop
          self.secondaries)
2795 a8083063 Iustin Pop
    return env, nl, nl
2796 a8083063 Iustin Pop
2797 a8083063 Iustin Pop
2798 a8083063 Iustin Pop
  def CheckPrereq(self):
2799 a8083063 Iustin Pop
    """Check prerequisites.
2800 a8083063 Iustin Pop

2801 a8083063 Iustin Pop
    """
2802 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2803 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2804 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2805 3ecf6786 Iustin Pop
                                 self.op.mode)
2806 a8083063 Iustin Pop
2807 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2808 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2809 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2810 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2811 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2812 3ecf6786 Iustin Pop
                                   " node and path options")
2813 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2814 a8083063 Iustin Pop
      if src_node_full is None:
2815 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2816 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2817 a8083063 Iustin Pop
2818 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2819 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2820 a8083063 Iustin Pop
2821 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2822 a8083063 Iustin Pop
2823 a8083063 Iustin Pop
      if not export_info:
2824 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2825 a8083063 Iustin Pop
2826 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2827 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2828 a8083063 Iustin Pop
2829 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2830 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2831 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2832 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2833 a8083063 Iustin Pop
2834 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2835 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2836 3ecf6786 Iustin Pop
                                   " one data disk")
2837 a8083063 Iustin Pop
2838 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2839 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2840 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2841 a8083063 Iustin Pop
                                                         'disk0_dump'))
2842 a8083063 Iustin Pop
      self.src_image = diskimage
2843 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2844 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2845 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2846 a8083063 Iustin Pop
2847 a8083063 Iustin Pop
    # check primary node
2848 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2849 a8083063 Iustin Pop
    if pnode is None:
2850 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2851 3ecf6786 Iustin Pop
                                 self.op.pnode)
2852 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2853 a8083063 Iustin Pop
    self.pnode = pnode
2854 a8083063 Iustin Pop
    self.secondaries = []
2855 a8083063 Iustin Pop
    # disk template and mirror node verification
2856 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2857 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2858 a8083063 Iustin Pop
2859 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
2860 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2861 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
2862 3ecf6786 Iustin Pop
                                   " a mirror node")
2863 a8083063 Iustin Pop
2864 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2865 a8083063 Iustin Pop
      if snode_name is None:
2866 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2867 3ecf6786 Iustin Pop
                                   self.op.snode)
2868 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2869 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
2870 3ecf6786 Iustin Pop
                                   " the primary node.")
2871 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2872 a8083063 Iustin Pop
2873 ed1ebc60 Guido Trotter
    # Check lv size requirements
2874 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2875 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2876 ed1ebc60 Guido Trotter
2877 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2878 ed1ebc60 Guido Trotter
    req_size_dict = {
2879 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2880 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2881 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2882 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2883 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2884 a1f445d3 Iustin Pop
      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
2885 ed1ebc60 Guido Trotter
    }
2886 ed1ebc60 Guido Trotter
2887 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2888 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
2889 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
2890 ed1ebc60 Guido Trotter
2891 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2892 ed1ebc60 Guido Trotter
2893 ed1ebc60 Guido Trotter
    for node in nodenames:
2894 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2895 ed1ebc60 Guido Trotter
      if not info:
2896 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
2897 3ecf6786 Iustin Pop
                                   " from node '%s'" % nodeinfo)
2898 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2899 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s."
2900 3ecf6786 Iustin Pop
                                   " %d MB available, %d MB required" %
2901 3ecf6786 Iustin Pop
                                   (node, info['vg_free'], req_size))
2902 ed1ebc60 Guido Trotter
2903 a8083063 Iustin Pop
    # os verification
2904 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2905 dfa96ded Guido Trotter
    if not os_obj:
2906 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
2907 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
2908 a8083063 Iustin Pop
2909 a8083063 Iustin Pop
    # instance verification
2910 89e1fc26 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
2911 a8083063 Iustin Pop
2912 bcf043c9 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
2913 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2914 a8083063 Iustin Pop
    if instance_name in instance_list:
2915 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2916 3ecf6786 Iustin Pop
                                 instance_name)
2917 a8083063 Iustin Pop
2918 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2919 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2920 a8083063 Iustin Pop
      inst_ip = None
2921 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2922 bcf043c9 Iustin Pop
      inst_ip = hostname1.ip
2923 a8083063 Iustin Pop
    else:
2924 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2925 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
2926 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
2927 a8083063 Iustin Pop
      inst_ip = ip
2928 a8083063 Iustin Pop
    self.inst_ip = inst_ip
2929 a8083063 Iustin Pop
2930 bdd55f71 Iustin Pop
    if self.op.start and not self.op.ip_check:
2931 bdd55f71 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
2932 bdd55f71 Iustin Pop
                                 " adding an instance in start mode")
2933 bdd55f71 Iustin Pop
2934 bdd55f71 Iustin Pop
    if self.op.ip_check:
2935 16abfbc2 Alexander Schreiber
      if utils.TcpPing(utils.HostInfo().name, hostname1.ip,
2936 16abfbc2 Alexander Schreiber
                       constants.DEFAULT_NODED_PORT):
2937 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2938 16abfbc2 Alexander Schreiber
                                   (hostname1.ip, instance_name))
2939 a8083063 Iustin Pop
2940 a8083063 Iustin Pop
    # bridge verification
2941 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
2942 a8083063 Iustin Pop
    if bridge is None:
2943 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
2944 a8083063 Iustin Pop
    else:
2945 a8083063 Iustin Pop
      self.op.bridge = bridge
2946 a8083063 Iustin Pop
2947 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
2948 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
2949 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
2950 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
2951 a8083063 Iustin Pop
2952 a8083063 Iustin Pop
    if self.op.start:
2953 a8083063 Iustin Pop
      self.instance_status = 'up'
2954 a8083063 Iustin Pop
    else:
2955 a8083063 Iustin Pop
      self.instance_status = 'down'
2956 a8083063 Iustin Pop
2957 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2958 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
2959 a8083063 Iustin Pop

2960 a8083063 Iustin Pop
    """
2961 a8083063 Iustin Pop
    instance = self.op.instance_name
2962 a8083063 Iustin Pop
    pnode_name = self.pnode.name
2963 a8083063 Iustin Pop
2964 a8083063 Iustin Pop
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
2965 a8083063 Iustin Pop
    if self.inst_ip is not None:
2966 a8083063 Iustin Pop
      nic.ip = self.inst_ip
2967 a8083063 Iustin Pop
2968 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
2969 a8083063 Iustin Pop
                                  self.op.disk_template,
2970 a8083063 Iustin Pop
                                  instance, pnode_name,
2971 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
2972 a8083063 Iustin Pop
                                  self.op.swap_size)
2973 a8083063 Iustin Pop
2974 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
2975 a8083063 Iustin Pop
                            primary_node=pnode_name,
2976 a8083063 Iustin Pop
                            memory=self.op.mem_size,
2977 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
2978 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
2979 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
2980 a8083063 Iustin Pop
                            status=self.instance_status,
2981 a8083063 Iustin Pop
                            )
2982 a8083063 Iustin Pop
2983 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
2984 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
2985 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2986 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
2987 a8083063 Iustin Pop
2988 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
2989 a8083063 Iustin Pop
2990 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
2991 a8083063 Iustin Pop
2992 a8083063 Iustin Pop
    if self.op.wait_for_sync:
2993 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
2994 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
2995 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
2996 a8083063 Iustin Pop
      time.sleep(15)
2997 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
2998 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
2999 a8083063 Iustin Pop
    else:
3000 a8083063 Iustin Pop
      disk_abort = False
3001 a8083063 Iustin Pop
3002 a8083063 Iustin Pop
    if disk_abort:
3003 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3004 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3005 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3006 3ecf6786 Iustin Pop
                               " this instance")
3007 a8083063 Iustin Pop
3008 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3009 a8083063 Iustin Pop
                (instance, pnode_name))
3010 a8083063 Iustin Pop
3011 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3012 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3013 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3014 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3015 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3016 3ecf6786 Iustin Pop
                                   " on node %s" %
3017 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3018 a8083063 Iustin Pop
3019 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3020 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3021 a8083063 Iustin Pop
        src_node = self.op.src_node
3022 a8083063 Iustin Pop
        src_image = self.src_image
3023 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3024 a8083063 Iustin Pop
                                                src_node, src_image):
3025 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3026 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3027 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3028 a8083063 Iustin Pop
      else:
3029 a8083063 Iustin Pop
        # also checked in the prereq part
3030 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3031 3ecf6786 Iustin Pop
                                     % self.op.mode)
3032 a8083063 Iustin Pop
3033 a8083063 Iustin Pop
    if self.op.start:
3034 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3035 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3036 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3037 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3038 a8083063 Iustin Pop
3039 a8083063 Iustin Pop
3040 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3041 a8083063 Iustin Pop
  """Connect to an instance's console.
3042 a8083063 Iustin Pop

3043 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3044 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3045 a8083063 Iustin Pop
  console.
3046 a8083063 Iustin Pop

3047 a8083063 Iustin Pop
  """
3048 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3049 a8083063 Iustin Pop
3050 a8083063 Iustin Pop
  def CheckPrereq(self):
3051 a8083063 Iustin Pop
    """Check prerequisites.
3052 a8083063 Iustin Pop

3053 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3054 a8083063 Iustin Pop

3055 a8083063 Iustin Pop
    """
3056 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3057 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3058 a8083063 Iustin Pop
    if instance is None:
3059 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3060 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3061 a8083063 Iustin Pop
    self.instance = instance
3062 a8083063 Iustin Pop
3063 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3064 a8083063 Iustin Pop
    """Connect to the console of an instance
3065 a8083063 Iustin Pop

3066 a8083063 Iustin Pop
    """
3067 a8083063 Iustin Pop
    instance = self.instance
3068 a8083063 Iustin Pop
    node = instance.primary_node
3069 a8083063 Iustin Pop
3070 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3071 a8083063 Iustin Pop
    if node_insts is False:
3072 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3073 a8083063 Iustin Pop
3074 a8083063 Iustin Pop
    if instance.name not in node_insts:
3075 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3076 a8083063 Iustin Pop
3077 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3078 a8083063 Iustin Pop
3079 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3080 a8083063 Iustin Pop
    console_cmd = hyper.GetShellCommandForConsole(instance.name)
3081 82122173 Iustin Pop
    # build ssh cmdline
3082 82122173 Iustin Pop
    argv = ["ssh", "-q", "-t"]
3083 82122173 Iustin Pop
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
3084 82122173 Iustin Pop
    argv.extend(ssh.BATCH_MODE_OPTS)
3085 82122173 Iustin Pop
    argv.append(node)
3086 82122173 Iustin Pop
    argv.append(console_cmd)
3087 82122173 Iustin Pop
    return "ssh", argv
3088 a8083063 Iustin Pop
3089 a8083063 Iustin Pop
3090 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
3091 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
3092 a8083063 Iustin Pop

3093 a8083063 Iustin Pop
  """
3094 a8083063 Iustin Pop
  HPATH = "mirror-add"
3095 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3096 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3097 a8083063 Iustin Pop
3098 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3099 a8083063 Iustin Pop
    """Build hooks env.
3100 a8083063 Iustin Pop

3101 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3102 a8083063 Iustin Pop

3103 a8083063 Iustin Pop
    """
3104 a8083063 Iustin Pop
    env = {
3105 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3106 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3107 a8083063 Iustin Pop
      }
3108 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3109 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3110 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3111 a8083063 Iustin Pop
    return env, nl, nl
3112 a8083063 Iustin Pop
3113 a8083063 Iustin Pop
  def CheckPrereq(self):
3114 a8083063 Iustin Pop
    """Check prerequisites.
3115 a8083063 Iustin Pop

3116 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3117 a8083063 Iustin Pop

3118 a8083063 Iustin Pop
    """
3119 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3120 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3121 a8083063 Iustin Pop
    if instance is None:
3122 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3123 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3124 a8083063 Iustin Pop
    self.instance = instance
3125 a8083063 Iustin Pop
3126 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3127 a8083063 Iustin Pop
    if remote_node is None:
3128 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3129 a8083063 Iustin Pop
    self.remote_node = remote_node
3130 a8083063 Iustin Pop
3131 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3132 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3133 3ecf6786 Iustin Pop
                                 " the instance.")
3134 a8083063 Iustin Pop
3135 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3136 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3137 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3138 a8083063 Iustin Pop
    for disk in instance.disks:
3139 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3140 a8083063 Iustin Pop
        break
3141 a8083063 Iustin Pop
    else:
3142 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3143 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3144 a8083063 Iustin Pop
    if len(disk.children) > 1:
3145 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The device already has two slave"
3146 3ecf6786 Iustin Pop
                                 " devices.\n"
3147 3ecf6786 Iustin Pop
                                 "This would create a 3-disk raid1"
3148 3ecf6786 Iustin Pop
                                 " which we don't allow.")
3149 a8083063 Iustin Pop
    self.disk = disk
3150 a8083063 Iustin Pop
3151 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3152 a8083063 Iustin Pop
    """Add the mirror component
3153 a8083063 Iustin Pop

3154 a8083063 Iustin Pop
    """
3155 a8083063 Iustin Pop
    disk = self.disk
3156 a8083063 Iustin Pop
    instance = self.instance
3157 a8083063 Iustin Pop
3158 a8083063 Iustin Pop
    remote_node = self.remote_node
3159 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3160 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
3161 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3162 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
3163 a8083063 Iustin Pop
3164 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
3165 a8083063 Iustin Pop
    #HARDCODE
3166 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, instance,
3167 3f78eef2 Iustin Pop
                                      new_drbd, False,
3168 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3169 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
3170 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
3171 a8083063 Iustin Pop
3172 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
3173 a8083063 Iustin Pop
    #HARDCODE
3174 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node,
3175 3f78eef2 Iustin Pop
                                    instance, new_drbd,
3176 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
3177 a8083063 Iustin Pop
      # remove secondary dev
3178 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3179 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
3180 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
3181 a8083063 Iustin Pop
3182 a8083063 Iustin Pop
    # the device exists now
3183 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
3184 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
3185 153d9724 Iustin Pop
    if not rpc.call_blockdev_addchildren(instance.primary_node,
3186 153d9724 Iustin Pop
                                         disk, [new_drbd]):
3187 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
3188 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3189 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3190 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
3191 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3192 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3193 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
3194 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
3195 a8083063 Iustin Pop
3196 a8083063 Iustin Pop
    disk.children.append(new_drbd)
3197 a8083063 Iustin Pop
3198 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3199 a8083063 Iustin Pop
3200 5bfac263 Iustin Pop
    _WaitForSync(self.cfg, instance, self.proc)
3201 a8083063 Iustin Pop
3202 a8083063 Iustin Pop
    return 0
3203 a8083063 Iustin Pop
3204 a8083063 Iustin Pop
3205 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
3206 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
3207 a8083063 Iustin Pop

3208 a8083063 Iustin Pop
  """
3209 a8083063 Iustin Pop
  HPATH = "mirror-remove"
3210 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3211 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3212 a8083063 Iustin Pop
3213 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3214 a8083063 Iustin Pop
    """Build hooks env.
3215 a8083063 Iustin Pop

3216 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3217 a8083063 Iustin Pop

3218 a8083063 Iustin Pop
    """
3219 a8083063 Iustin Pop
    env = {
3220 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3221 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
3222 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
3223 a8083063 Iustin Pop
      }
3224 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3225 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3226 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3227 a8083063 Iustin Pop
    return env, nl, nl
3228 a8083063 Iustin Pop
3229 a8083063 Iustin Pop
  def CheckPrereq(self):
3230 a8083063 Iustin Pop
    """Check prerequisites.
3231 a8083063 Iustin Pop

3232 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3233 a8083063 Iustin Pop

3234 a8083063 Iustin Pop
    """
3235 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3236 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3237 a8083063 Iustin Pop
    if instance is None:
3238 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3239 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3240 a8083063 Iustin Pop
    self.instance = instance
3241 a8083063 Iustin Pop
3242 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3243 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3244 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3245 a8083063 Iustin Pop
    for disk in instance.disks:
3246 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3247 a8083063 Iustin Pop
        break
3248 a8083063 Iustin Pop
    else:
3249 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3250 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3251 a8083063 Iustin Pop
    for child in disk.children:
3252 fe96220b Iustin Pop
      if (child.dev_type == constants.LD_DRBD7 and
3253 fe96220b Iustin Pop
          child.logical_id[2] == self.op.disk_id):
3254 a8083063 Iustin Pop
        break
3255 a8083063 Iustin Pop
    else:
3256 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3257 a8083063 Iustin Pop
3258 a8083063 Iustin Pop
    if len(disk.children) < 2:
3259 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3260 3ecf6786 Iustin Pop
                                 " a mirror.")
3261 a8083063 Iustin Pop
    self.disk = disk
3262 a8083063 Iustin Pop
    self.child = child
3263 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3264 a8083063 Iustin Pop
      oid = 1
3265 a8083063 Iustin Pop
    else:
3266 a8083063 Iustin Pop
      oid = 0
3267 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3268 a8083063 Iustin Pop
3269 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3270 a8083063 Iustin Pop
    """Remove the mirror component
3271 a8083063 Iustin Pop

3272 a8083063 Iustin Pop
    """
3273 a8083063 Iustin Pop
    instance = self.instance
3274 a8083063 Iustin Pop
    disk = self.disk
3275 a8083063 Iustin Pop
    child = self.child
3276 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3277 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3278 153d9724 Iustin Pop
    if not rpc.call_blockdev_removechildren(instance.primary_node,
3279 153d9724 Iustin Pop
                                            disk, [child]):
3280 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3281 a8083063 Iustin Pop
3282 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3283 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3284 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3285 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3286 a8083063 Iustin Pop
                     " continuing operation." % node)
3287 a8083063 Iustin Pop
3288 a8083063 Iustin Pop
    disk.children.remove(child)
3289 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3290 a8083063 Iustin Pop
3291 a8083063 Iustin Pop
3292 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3293 a8083063 Iustin Pop
  """Replace the disks of an instance.
3294 a8083063 Iustin Pop

3295 a8083063 Iustin Pop
  """
3296 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3297 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3298 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3299 a8083063 Iustin Pop
3300 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3301 a8083063 Iustin Pop
    """Build hooks env.
3302 a8083063 Iustin Pop

3303 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3304 a8083063 Iustin Pop

3305 a8083063 Iustin Pop
    """
3306 a8083063 Iustin Pop
    env = {
3307 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3308 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3309 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3310 a8083063 Iustin Pop
      }
3311 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3312 0834c866 Iustin Pop
    nl = [
3313 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3314 0834c866 Iustin Pop
      self.instance.primary_node,
3315 0834c866 Iustin Pop
      ]
3316 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3317 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3318 a8083063 Iustin Pop
    return env, nl, nl
3319 a8083063 Iustin Pop
3320 a8083063 Iustin Pop
  def CheckPrereq(self):
3321 a8083063 Iustin Pop
    """Check prerequisites.
3322 a8083063 Iustin Pop

3323 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3324 a8083063 Iustin Pop

3325 a8083063 Iustin Pop
    """
3326 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3327 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3328 a8083063 Iustin Pop
    if instance is None:
3329 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3330 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3331 a8083063 Iustin Pop
    self.instance = instance
3332 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3333 a8083063 Iustin Pop
3334 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3335 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3336 a9e0c397 Iustin Pop
                                 " network mirrored.")
3337 a8083063 Iustin Pop
3338 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3339 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3340 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3341 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3342 a8083063 Iustin Pop
3343 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3344 a9e0c397 Iustin Pop
3345 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3346 a9e0c397 Iustin Pop
    if remote_node is not None:
3347 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3348 a8083063 Iustin Pop
      if remote_node is None:
3349 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3350 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3351 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3352 a9e0c397 Iustin Pop
    else:
3353 a9e0c397 Iustin Pop
      self.remote_node_info = None
3354 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3355 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3356 3ecf6786 Iustin Pop
                                 " the instance.")
3357 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3358 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3359 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3360 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3361 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3362 0834c866 Iustin Pop
                                   " replacement")
3363 a9e0c397 Iustin Pop
      # the user gave the current secondary, switch to
3364 0834c866 Iustin Pop
      # 'no-replace-secondary' mode for drbd7
3365 a9e0c397 Iustin Pop
      remote_node = None
3366 a9e0c397 Iustin Pop
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3367 a9e0c397 Iustin Pop
        self.op.mode != constants.REPLACE_DISK_ALL):
3368 a9e0c397 Iustin Pop
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3369 a9e0c397 Iustin Pop
                                 " disks replacement, not individual ones")
3370 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3371 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3372 7df43a76 Iustin Pop
          remote_node is not None):
3373 7df43a76 Iustin Pop
        # switch to replace secondary mode
3374 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3375 7df43a76 Iustin Pop
3376 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3377 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Template 'drbd8' only allows primary or"
3378 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3379 a9e0c397 Iustin Pop
                                   " both at once")
3380 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3381 a9e0c397 Iustin Pop
        if remote_node is not None:
3382 a9e0c397 Iustin Pop
          raise errors.OpPrereqError("Template 'drbd8' does not allow changing"
3383 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3384 a9e0c397 Iustin Pop
                                     " node disk replacement")
3385 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3386 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3387 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3388 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3389 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3390 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3391 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3392 a9e0c397 Iustin Pop
      else:
3393 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3394 a9e0c397 Iustin Pop
3395 a9e0c397 Iustin Pop
    for name in self.op.disks:
3396 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3397 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3398 a9e0c397 Iustin Pop
                                   (name, instance.name))
3399 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3400 a8083063 Iustin Pop
3401 a9e0c397 Iustin Pop
  def _ExecRR1(self, feedback_fn):
3402 a8083063 Iustin Pop
    """Replace the disks of an instance.
3403 a8083063 Iustin Pop

3404 a8083063 Iustin Pop
    """
3405 a8083063 Iustin Pop
    instance = self.instance
3406 a8083063 Iustin Pop
    iv_names = {}
3407 a8083063 Iustin Pop
    # start of work
3408 a9e0c397 Iustin Pop
    if self.op.remote_node is None:
3409 a9e0c397 Iustin Pop
      remote_node = self.sec_node
3410 a9e0c397 Iustin Pop
    else:
3411 a9e0c397 Iustin Pop
      remote_node = self.op.remote_node
3412 a8083063 Iustin Pop
    cfg = self.cfg
3413 a8083063 Iustin Pop
    for dev in instance.disks:
3414 a8083063 Iustin Pop
      size = dev.size
3415 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3416 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3417 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3418 923b1523 Iustin Pop
                                       remote_node, size, names)
3419 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3420 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3421 a8083063 Iustin Pop
                  dev.iv_name)
3422 a8083063 Iustin Pop
      #HARDCODE
3423 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3424 3f78eef2 Iustin Pop
                                        new_drbd, False,
3425 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3426 3ecf6786 Iustin Pop
        raise errors.OpExecError("Failed to create new component on"
3427 3ecf6786 Iustin Pop
                                 " secondary node %s\n"
3428 3ecf6786 Iustin Pop
                                 "Full abort, cleanup manually!" %
3429 3ecf6786 Iustin Pop
                                 remote_node)
3430 a8083063 Iustin Pop
3431 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3432 a8083063 Iustin Pop
      #HARDCODE
3433 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3434 3f78eef2 Iustin Pop
                                      instance, new_drbd,
3435 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3436 a8083063 Iustin Pop
        # remove secondary dev
3437 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3438 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3439 a8083063 Iustin Pop
        raise errors.OpExecError("Failed to create volume on primary!\n"
3440 a8083063 Iustin Pop
                                 "Full abort, cleanup manually!!")
3441 a8083063 Iustin Pop
3442 a8083063 Iustin Pop
      # the device exists now
3443 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3444 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3445 153d9724 Iustin Pop
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3446 153d9724 Iustin Pop
                                           [new_drbd]):
3447 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3448 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3449 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3450 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3451 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3452 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3453 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3454 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3455 a8083063 Iustin Pop
3456 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3457 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3458 a8083063 Iustin Pop
3459 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3460 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3461 a8083063 Iustin Pop
    # return value
3462 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3463 a8083063 Iustin Pop
3464 a8083063 Iustin Pop
    # so check manually all the devices
3465 a8083063 Iustin Pop
    for name in iv_names:
3466 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3467 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3468 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3469 a8083063 Iustin Pop
      if is_degr:
3470 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3471 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3472 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3473 a8083063 Iustin Pop
      if is_degr:
3474 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3475 a8083063 Iustin Pop
3476 a8083063 Iustin Pop
    for name in iv_names:
3477 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3478 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3479 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3480 153d9724 Iustin Pop
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3481 153d9724 Iustin Pop
                                              dev, [child]):
3482 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3483 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3484 a8083063 Iustin Pop
        continue
3485 a8083063 Iustin Pop
3486 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3487 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3488 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3489 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3490 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3491 a8083063 Iustin Pop
                       " continuing operation." % node)
3492 a8083063 Iustin Pop
3493 a8083063 Iustin Pop
      dev.children.remove(child)
3494 a8083063 Iustin Pop
3495 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3496 a8083063 Iustin Pop
3497 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3498 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3499 a9e0c397 Iustin Pop

3500 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3501 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3502 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3503 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3504 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3505 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3506 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3507 a9e0c397 Iustin Pop
      - wait for sync across all devices
3508 a9e0c397 Iustin Pop
      - for each modified disk:
3509 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3510 a9e0c397 Iustin Pop

3511 a9e0c397 Iustin Pop
    Failures are not very well handled.
3512 cff90b79 Iustin Pop

3513 a9e0c397 Iustin Pop
    """
3514 cff90b79 Iustin Pop
    steps_total = 6
3515 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3516 a9e0c397 Iustin Pop
    instance = self.instance
3517 a9e0c397 Iustin Pop
    iv_names = {}
3518 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3519 a9e0c397 Iustin Pop
    # start of work
3520 a9e0c397 Iustin Pop
    cfg = self.cfg
3521 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3522 cff90b79 Iustin Pop
    oth_node = self.oth_node
3523 cff90b79 Iustin Pop
3524 cff90b79 Iustin Pop
    # Step: check device activation
3525 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3526 cff90b79 Iustin Pop
    info("checking volume groups")
3527 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3528 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3529 cff90b79 Iustin Pop
    if not results:
3530 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3531 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3532 cff90b79 Iustin Pop
      res = results.get(node, False)
3533 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3534 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3535 cff90b79 Iustin Pop
                                 (my_vg, node))
3536 cff90b79 Iustin Pop
    for dev in instance.disks:
3537 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3538 cff90b79 Iustin Pop
        continue
3539 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3540 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3541 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3542 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3543 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3544 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3545 cff90b79 Iustin Pop
3546 cff90b79 Iustin Pop
    # Step: check other node consistency
3547 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3548 cff90b79 Iustin Pop
    for dev in instance.disks:
3549 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3550 cff90b79 Iustin Pop
        continue
3551 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3552 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3553 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3554 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3555 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3556 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3557 cff90b79 Iustin Pop
3558 cff90b79 Iustin Pop
    # Step: create new storage
3559 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3560 a9e0c397 Iustin Pop
    for dev in instance.disks:
3561 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3562 a9e0c397 Iustin Pop
        continue
3563 a9e0c397 Iustin Pop
      size = dev.size
3564 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3565 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3566 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3567 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3568 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3569 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3570 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3571 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3572 a9e0c397 Iustin Pop
      old_lvs = dev.children
3573 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3574 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3575 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3576 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3577 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3578 a9e0c397 Iustin Pop
      # are talking about the secondary node
3579 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3580 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3581 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3582 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3583 a9e0c397 Iustin Pop
                                   " node '%s'" %
3584 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3585 a9e0c397 Iustin Pop
3586 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3587 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3588 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3589 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3590 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3591 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3592 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3593 cff90b79 Iustin Pop
      #dev.children = []
3594 cff90b79 Iustin Pop
      #cfg.Update(instance)
3595 a9e0c397 Iustin Pop
3596 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3597 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3598 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3599 a9e0c397 Iustin Pop
      # using the assumption than logical_id == physical_id (which in
3600 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3601 cff90b79 Iustin Pop
3602 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3603 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3604 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3605 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3606 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3607 cff90b79 Iustin Pop
      rlist = []
3608 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3609 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3610 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3611 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3612 cff90b79 Iustin Pop
3613 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3614 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3615 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3616 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3617 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3618 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3619 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3620 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3621 cff90b79 Iustin Pop
3622 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3623 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3624 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3625 a9e0c397 Iustin Pop
3626 cff90b79 Iustin Pop
      for disk in old_lvs:
3627 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3628 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3629 a9e0c397 Iustin Pop
3630 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3631 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3632 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3633 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3634 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3635 cff90b79 Iustin Pop
            warning("Can't rollback device %s", "manually cleanup unused"
3636 cff90b79 Iustin Pop
                    " logical volumes")
3637 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3638 a9e0c397 Iustin Pop
3639 a9e0c397 Iustin Pop
      dev.children = new_lvs
3640 a9e0c397 Iustin Pop
      cfg.Update(instance)
3641 a9e0c397 Iustin Pop
3642 cff90b79 Iustin Pop
    # Step: wait for sync
3643 a9e0c397 Iustin Pop
3644 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3645 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3646 a9e0c397 Iustin Pop
    # return value
3647 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3648 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3649 a9e0c397 Iustin Pop
3650 a9e0c397 Iustin Pop
    # so check manually all the devices
3651 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3652 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3653 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3654 a9e0c397 Iustin Pop
      if is_degr:
3655 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3656 a9e0c397 Iustin Pop
3657 cff90b79 Iustin Pop
    # Step: remove old storage
3658 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3659 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3660 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3661 a9e0c397 Iustin Pop
      for lv in old_lvs:
3662 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3663 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3664 cff90b79 Iustin Pop
          warning("Can't remove old LV", "manually remove unused LVs")
3665 a9e0c397 Iustin Pop
          continue
3666 a9e0c397 Iustin Pop
3667 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3668 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3669 a9e0c397 Iustin Pop

3670 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3671 a9e0c397 Iustin Pop
      - for all disks of the instance:
3672 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3673 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3674 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3675 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3676 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3677 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3678 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3679 a9e0c397 Iustin Pop
          not network enabled
3680 a9e0c397 Iustin Pop
      - wait for sync across all devices
3681 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3682 a9e0c397 Iustin Pop

3683 a9e0c397 Iustin Pop
    Failures are not very well handled.
3684 0834c866 Iustin Pop

3685 a9e0c397 Iustin Pop
    """
3686 0834c866 Iustin Pop
    steps_total = 6
3687 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3688 a9e0c397 Iustin Pop
    instance = self.instance
3689 a9e0c397 Iustin Pop
    iv_names = {}
3690 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3691 a9e0c397 Iustin Pop
    # start of work
3692 a9e0c397 Iustin Pop
    cfg = self.cfg
3693 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3694 a9e0c397 Iustin Pop
    new_node = self.new_node
3695 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3696 0834c866 Iustin Pop
3697 0834c866 Iustin Pop
    # Step: check device activation
3698 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3699 0834c866 Iustin Pop
    info("checking volume groups")
3700 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3701 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3702 0834c866 Iustin Pop
    if not results:
3703 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3704 0834c866 Iustin Pop
    for node in pri_node, new_node:
3705 0834c866 Iustin Pop
      res = results.get(node, False)
3706 0834c866 Iustin Pop
      if not res or my_vg not in res:
3707 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3708 0834c866 Iustin Pop
                                 (my_vg, node))
3709 0834c866 Iustin Pop
    for dev in instance.disks:
3710 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3711 0834c866 Iustin Pop
        continue
3712 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3713 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3714 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3715 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3716 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3717 0834c866 Iustin Pop
3718 0834c866 Iustin Pop
    # Step: check other node consistency
3719 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3720 0834c866 Iustin Pop
    for dev in instance.disks:
3721 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3722 0834c866 Iustin Pop
        continue
3723 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3724 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3725 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3726 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3727 0834c866 Iustin Pop
                                 pri_node)
3728 0834c866 Iustin Pop
3729 0834c866 Iustin Pop
    # Step: create new storage
3730 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3731 a9e0c397 Iustin Pop
    for dev in instance.disks:
3732 a9e0c397 Iustin Pop
      size = dev.size
3733 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3734 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3735 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3736 a9e0c397 Iustin Pop
      # are talking about the secondary node
3737 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3738 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3739 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3740 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3741 a9e0c397 Iustin Pop
                                   " node '%s'" %
3742 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3743 a9e0c397 Iustin Pop
3744 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3745 0834c866 Iustin Pop
3746 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3747 0834c866 Iustin Pop
    for dev in instance.disks:
3748 0834c866 Iustin Pop
      size = dev.size
3749 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3750 a9e0c397 Iustin Pop
      # create new devices on new_node
3751 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3752 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3753 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3754 a9e0c397 Iustin Pop
                              children=dev.children)
3755 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3756 3f78eef2 Iustin Pop
                                        new_drbd, False,
3757 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3758 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3759 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3760 a9e0c397 Iustin Pop
3761 0834c866 Iustin Pop
    for dev in instance.disks:
3762 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3763 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3764 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3765 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3766 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3767 0834c866 Iustin Pop
                "Please cleanup this device manuall as soon as possible")
3768 a9e0c397 Iustin Pop
3769 a9e0c397 Iustin Pop
      # we have new storage, we 'rename' the network on the primary
3770 0834c866 Iustin Pop
      info("switching primary drbd for %s to new secondary node" % dev.iv_name)
3771 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3772 a9e0c397 Iustin Pop
      # rename to the ip of the new node
3773 a9e0c397 Iustin Pop
      new_uid = list(dev.physical_id)
3774 a9e0c397 Iustin Pop
      new_uid[2] = self.remote_node_info.secondary_ip
3775 a9e0c397 Iustin Pop
      rlist = [(dev, tuple(new_uid))]
3776 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(pri_node, rlist):
3777 0834c866 Iustin Pop
        raise errors.OpExecError("Can't detach & re-attach drbd %s on node"
3778 a9e0c397 Iustin Pop
                                 " %s from %s to %s" %
3779 a9e0c397 Iustin Pop
                                 (dev.iv_name, pri_node, old_node, new_node))
3780 a9e0c397 Iustin Pop
      dev.logical_id = (pri_node, new_node, dev.logical_id[2])
3781 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3782 a9e0c397 Iustin Pop
      cfg.Update(instance)
3783 a9e0c397 Iustin Pop
3784 a9e0c397 Iustin Pop
3785 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3786 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3787 a9e0c397 Iustin Pop
    # return value
3788 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3789 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3790 a9e0c397 Iustin Pop
3791 a9e0c397 Iustin Pop
    # so check manually all the devices
3792 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3793 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3794 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3795 a9e0c397 Iustin Pop
      if is_degr:
3796 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3797 a9e0c397 Iustin Pop
3798 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3799 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3800 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3801 a9e0c397 Iustin Pop
      for lv in old_lvs:
3802 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3803 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3804 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3805 0834c866 Iustin Pop
                  "Cleanup stale volumes by hand")
3806 a9e0c397 Iustin Pop
3807 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3808 a9e0c397 Iustin Pop
    """Execute disk replacement.
3809 a9e0c397 Iustin Pop

3810 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3811 a9e0c397 Iustin Pop

3812 a9e0c397 Iustin Pop
    """
3813 a9e0c397 Iustin Pop
    instance = self.instance
3814 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_REMOTE_RAID1:
3815 a9e0c397 Iustin Pop
      fn = self._ExecRR1
3816 a9e0c397 Iustin Pop
    elif instance.disk_template == constants.DT_DRBD8:
3817 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3818 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3819 a9e0c397 Iustin Pop
      else:
3820 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3821 a9e0c397 Iustin Pop
    else:
3822 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3823 a9e0c397 Iustin Pop
    return fn(feedback_fn)
3824 a9e0c397 Iustin Pop
3825 a8083063 Iustin Pop
3826 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3827 a8083063 Iustin Pop
  """Query runtime instance data.
3828 a8083063 Iustin Pop

3829 a8083063 Iustin Pop
  """
3830 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3831 a8083063 Iustin Pop
3832 a8083063 Iustin Pop
  def CheckPrereq(self):
3833 a8083063 Iustin Pop
    """Check prerequisites.
3834 a8083063 Iustin Pop

3835 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3836 a8083063 Iustin Pop

3837 a8083063 Iustin Pop
    """
3838 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3839 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3840 a8083063 Iustin Pop
    if self.op.instances:
3841 a8083063 Iustin Pop
      self.wanted_instances = []
3842 a8083063 Iustin Pop
      names = self.op.instances
3843 a8083063 Iustin Pop
      for name in names:
3844 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3845 a8083063 Iustin Pop
        if instance is None:
3846 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3847 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3848 a8083063 Iustin Pop
    else:
3849 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3850 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3851 a8083063 Iustin Pop
    return
3852 a8083063 Iustin Pop
3853 a8083063 Iustin Pop
3854 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3855 a8083063 Iustin Pop
    """Compute block device status.
3856 a8083063 Iustin Pop

3857 a8083063 Iustin Pop
    """
3858 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3859 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3860 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
3861 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3862 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3863 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3864 a8083063 Iustin Pop
      else:
3865 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3866 a8083063 Iustin Pop
3867 a8083063 Iustin Pop
    if snode:
3868 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3869 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3870 a8083063 Iustin Pop
    else:
3871 a8083063 Iustin Pop
      dev_sstatus = None
3872 a8083063 Iustin Pop
3873 a8083063 Iustin Pop
    if dev.children:
3874 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3875 a8083063 Iustin Pop
                      for child in dev.children]
3876 a8083063 Iustin Pop
    else:
3877 a8083063 Iustin Pop
      dev_children = []
3878 a8083063 Iustin Pop
3879 a8083063 Iustin Pop
    data = {
3880 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3881 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3882 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3883 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3884 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3885 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3886 a8083063 Iustin Pop
      "children": dev_children,
3887 a8083063 Iustin Pop
      }
3888 a8083063 Iustin Pop
3889 a8083063 Iustin Pop
    return data
3890 a8083063 Iustin Pop
3891 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3892 a8083063 Iustin Pop
    """Gather and return data"""
3893 a8083063 Iustin Pop
    result = {}
3894 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3895 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3896 a8083063 Iustin Pop
                                                instance.name)
3897 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3898 a8083063 Iustin Pop
        remote_state = "up"
3899 a8083063 Iustin Pop
      else:
3900 a8083063 Iustin Pop
        remote_state = "down"
3901 a8083063 Iustin Pop
      if instance.status == "down":
3902 a8083063 Iustin Pop
        config_state = "down"
3903 a8083063 Iustin Pop
      else:
3904 a8083063 Iustin Pop
        config_state = "up"
3905 a8083063 Iustin Pop
3906 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3907 a8083063 Iustin Pop
               for device in instance.disks]
3908 a8083063 Iustin Pop
3909 a8083063 Iustin Pop
      idict = {
3910 a8083063 Iustin Pop
        "name": instance.name,
3911 a8083063 Iustin Pop
        "config_state": config_state,
3912 a8083063 Iustin Pop
        "run_state": remote_state,
3913 a8083063 Iustin Pop
        "pnode": instance.primary_node,
3914 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
3915 a8083063 Iustin Pop
        "os": instance.os,
3916 a8083063 Iustin Pop
        "memory": instance.memory,
3917 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3918 a8083063 Iustin Pop
        "disks": disks,
3919 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
3920 a8083063 Iustin Pop
        }
3921 a8083063 Iustin Pop
3922 a8083063 Iustin Pop
      result[instance.name] = idict
3923 a8083063 Iustin Pop
3924 a8083063 Iustin Pop
    return result
3925 a8083063 Iustin Pop
3926 a8083063 Iustin Pop
3927 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
3928 a8083063 Iustin Pop
  """Modifies an instances's parameters.
3929 a8083063 Iustin Pop

3930 a8083063 Iustin Pop
  """
3931 a8083063 Iustin Pop
  HPATH = "instance-modify"
3932 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3933 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3934 a8083063 Iustin Pop
3935 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3936 a8083063 Iustin Pop
    """Build hooks env.
3937 a8083063 Iustin Pop

3938 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
3939 a8083063 Iustin Pop

3940 a8083063 Iustin Pop
    """
3941 396e1b78 Michael Hanselmann
    args = dict()
3942 a8083063 Iustin Pop
    if self.mem:
3943 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
3944 a8083063 Iustin Pop
    if self.vcpus:
3945 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
3946 396e1b78 Michael Hanselmann
    if self.do_ip or self.do_bridge:
3947 396e1b78 Michael Hanselmann
      if self.do_ip:
3948 396e1b78 Michael Hanselmann
        ip = self.ip
3949 396e1b78 Michael Hanselmann
      else:
3950 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
3951 396e1b78 Michael Hanselmann
      if self.bridge:
3952 396e1b78 Michael Hanselmann
        bridge = self.bridge
3953 396e1b78 Michael Hanselmann
      else:
3954 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
3955 396e1b78 Michael Hanselmann
      args['nics'] = [(ip, bridge)]
3956 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
3957 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3958 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3959 a8083063 Iustin Pop
    return env, nl, nl
3960 a8083063 Iustin Pop
3961 a8083063 Iustin Pop
  def CheckPrereq(self):
3962 a8083063 Iustin Pop
    """Check prerequisites.
3963 a8083063 Iustin Pop

3964 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
3965 a8083063 Iustin Pop

3966 a8083063 Iustin Pop
    """
3967 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
3968 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
3969 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
3970 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
3971 a8083063 Iustin Pop
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
3972 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
3973 a8083063 Iustin Pop
    if self.mem is not None:
3974 a8083063 Iustin Pop
      try:
3975 a8083063 Iustin Pop
        self.mem = int(self.mem)
3976 a8083063 Iustin Pop
      except ValueError, err:
3977 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
3978 a8083063 Iustin Pop
    if self.vcpus is not None:
3979 a8083063 Iustin Pop
      try:
3980 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
3981 a8083063 Iustin Pop
      except ValueError, err:
3982 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
3983 a8083063 Iustin Pop
    if self.ip is not None:
3984 a8083063 Iustin Pop
      self.do_ip = True
3985 a8083063 Iustin Pop
      if self.ip.lower() == "none":
3986 a8083063 Iustin Pop
        self.ip = None
3987 a8083063 Iustin Pop
      else:
3988 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
3989 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
3990 a8083063 Iustin Pop
    else:
3991 a8083063 Iustin Pop
      self.do_ip = False
3992 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
3993 a8083063 Iustin Pop
3994 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3995 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3996 a8083063 Iustin Pop
    if instance is None:
3997 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
3998 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3999 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4000 a8083063 Iustin Pop
    self.instance = instance
4001 a8083063 Iustin Pop
    return
4002 a8083063 Iustin Pop
4003 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4004 a8083063 Iustin Pop
    """Modifies an instance.
4005 a8083063 Iustin Pop

4006 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4007 a8083063 Iustin Pop
    """
4008 a8083063 Iustin Pop
    result = []
4009 a8083063 Iustin Pop
    instance = self.instance
4010 a8083063 Iustin Pop
    if self.mem:
4011 a8083063 Iustin Pop
      instance.memory = self.mem
4012 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4013 a8083063 Iustin Pop
    if self.vcpus:
4014 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4015 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4016 a8083063 Iustin Pop
    if self.do_ip:
4017 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4018 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4019 a8083063 Iustin Pop
    if self.bridge:
4020 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4021 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4022 a8083063 Iustin Pop
4023 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4024 a8083063 Iustin Pop
4025 a8083063 Iustin Pop
    return result
4026 a8083063 Iustin Pop
4027 a8083063 Iustin Pop
4028 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4029 a8083063 Iustin Pop
  """Query the exports list
4030 a8083063 Iustin Pop

4031 a8083063 Iustin Pop
  """
4032 a8083063 Iustin Pop
  _OP_REQP = []
4033 a8083063 Iustin Pop
4034 a8083063 Iustin Pop
  def CheckPrereq(self):
4035 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4036 a8083063 Iustin Pop

4037 a8083063 Iustin Pop
    """
4038 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4039 a8083063 Iustin Pop
4040 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4041 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4042 a8083063 Iustin Pop

4043 a8083063 Iustin Pop
    Returns:
4044 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4045 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4046 a8083063 Iustin Pop
      that node.
4047 a8083063 Iustin Pop

4048 a8083063 Iustin Pop
    """
4049 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4050 a8083063 Iustin Pop
4051 a8083063 Iustin Pop
4052 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4053 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4054 a8083063 Iustin Pop

4055 a8083063 Iustin Pop
  """
4056 a8083063 Iustin Pop
  HPATH = "instance-export"
4057 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4058 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4059 a8083063 Iustin Pop
4060 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4061 a8083063 Iustin Pop
    """Build hooks env.
4062 a8083063 Iustin Pop

4063 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4064 a8083063 Iustin Pop

4065 a8083063 Iustin Pop
    """
4066 a8083063 Iustin Pop
    env = {
4067 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4068 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4069 a8083063 Iustin Pop
      }
4070 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4071 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4072 a8083063 Iustin Pop
          self.op.target_node]
4073 a8083063 Iustin Pop
    return env, nl, nl
4074 a8083063 Iustin Pop
4075 a8083063 Iustin Pop
  def CheckPrereq(self):
4076 a8083063 Iustin Pop
    """Check prerequisites.
4077 a8083063 Iustin Pop

4078 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
4079 a8083063 Iustin Pop

4080 a8083063 Iustin Pop
    """
4081 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4082 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4083 a8083063 Iustin Pop
    if self.instance is None:
4084 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4085 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4086 a8083063 Iustin Pop
4087 a8083063 Iustin Pop
    # node verification
4088 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4089 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4090 a8083063 Iustin Pop
4091 a8083063 Iustin Pop
    if self.dst_node is None:
4092 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4093 3ecf6786 Iustin Pop
                                 self.op.target_node)
4094 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4095 a8083063 Iustin Pop
4096 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4097 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4098 a8083063 Iustin Pop

4099 a8083063 Iustin Pop
    """
4100 a8083063 Iustin Pop
    instance = self.instance
4101 a8083063 Iustin Pop
    dst_node = self.dst_node
4102 a8083063 Iustin Pop
    src_node = instance.primary_node
4103 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
4104 a8083063 Iustin Pop
    if self.op.shutdown:
4105 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
4106 5bfac263 Iustin Pop
      self.proc.ChainOpCode(op)
4107 a8083063 Iustin Pop
4108 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4109 a8083063 Iustin Pop
4110 a8083063 Iustin Pop
    snap_disks = []
4111 a8083063 Iustin Pop
4112 a8083063 Iustin Pop
    try:
4113 a8083063 Iustin Pop
      for disk in instance.disks:
4114 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4115 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4116 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4117 a8083063 Iustin Pop
4118 a8083063 Iustin Pop
          if not new_dev_name:
4119 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4120 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4121 a8083063 Iustin Pop
          else:
4122 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4123 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4124 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4125 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4126 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4127 a8083063 Iustin Pop
4128 a8083063 Iustin Pop
    finally:
4129 a8083063 Iustin Pop
      if self.op.shutdown:
4130 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
4131 a8083063 Iustin Pop
                                       force=False)
4132 5bfac263 Iustin Pop
        self.proc.ChainOpCode(op)
4133 a8083063 Iustin Pop
4134 a8083063 Iustin Pop
    # TODO: check for size
4135 a8083063 Iustin Pop
4136 a8083063 Iustin Pop
    for dev in snap_disks:
4137 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
4138 a8083063 Iustin Pop
                                           instance):
4139 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
4140 a8083063 Iustin Pop
                     " %s to node %s" %
4141 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
4142 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4143 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
4144 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
4145 a8083063 Iustin Pop
4146 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4147 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4148 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4149 a8083063 Iustin Pop
4150 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4151 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4152 a8083063 Iustin Pop
4153 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4154 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4155 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4156 a8083063 Iustin Pop
    if nodelist:
4157 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4158 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4159 a8083063 Iustin Pop
      for node in exportlist:
4160 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4161 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4162 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4163 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4164 5c947f38 Iustin Pop
4165 5c947f38 Iustin Pop
4166 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4167 5c947f38 Iustin Pop
  """Generic tags LU.
4168 5c947f38 Iustin Pop

4169 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4170 5c947f38 Iustin Pop

4171 5c947f38 Iustin Pop
  """
4172 5c947f38 Iustin Pop
  def CheckPrereq(self):
4173 5c947f38 Iustin Pop
    """Check prerequisites.
4174 5c947f38 Iustin Pop

4175 5c947f38 Iustin Pop
    """
4176 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4177 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4178 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4179 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4180 5c947f38 Iustin Pop
      if name is None:
4181 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4182 3ecf6786 Iustin Pop
                                   (self.op.name,))
4183 5c947f38 Iustin Pop
      self.op.name = name
4184 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4185 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4186 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4187 5c947f38 Iustin Pop
      if name is None:
4188 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4189 3ecf6786 Iustin Pop
                                   (self.op.name,))
4190 5c947f38 Iustin Pop
      self.op.name = name
4191 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4192 5c947f38 Iustin Pop
    else:
4193 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4194 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4195 5c947f38 Iustin Pop
4196 5c947f38 Iustin Pop
4197 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4198 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4199 5c947f38 Iustin Pop

4200 5c947f38 Iustin Pop
  """
4201 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4202 5c947f38 Iustin Pop
4203 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4204 5c947f38 Iustin Pop
    """Returns the tag list.
4205 5c947f38 Iustin Pop

4206 5c947f38 Iustin Pop
    """
4207 5c947f38 Iustin Pop
    return self.target.GetTags()
4208 5c947f38 Iustin Pop
4209 5c947f38 Iustin Pop
4210 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4211 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4212 73415719 Iustin Pop

4213 73415719 Iustin Pop
  """
4214 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4215 73415719 Iustin Pop
4216 73415719 Iustin Pop
  def CheckPrereq(self):
4217 73415719 Iustin Pop
    """Check prerequisites.
4218 73415719 Iustin Pop

4219 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4220 73415719 Iustin Pop

4221 73415719 Iustin Pop
    """
4222 73415719 Iustin Pop
    try:
4223 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4224 73415719 Iustin Pop
    except re.error, err:
4225 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4226 73415719 Iustin Pop
                                 (self.op.pattern, err))
4227 73415719 Iustin Pop
4228 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4229 73415719 Iustin Pop
    """Returns the tag list.
4230 73415719 Iustin Pop

4231 73415719 Iustin Pop
    """
4232 73415719 Iustin Pop
    cfg = self.cfg
4233 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4234 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4235 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4236 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4237 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4238 73415719 Iustin Pop
    results = []
4239 73415719 Iustin Pop
    for path, target in tgts:
4240 73415719 Iustin Pop
      for tag in target.GetTags():
4241 73415719 Iustin Pop
        if self.re.search(tag):
4242 73415719 Iustin Pop
          results.append((path, tag))
4243 73415719 Iustin Pop
    return results
4244 73415719 Iustin Pop
4245 73415719 Iustin Pop
4246 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4247 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4248 5c947f38 Iustin Pop

4249 5c947f38 Iustin Pop
  """
4250 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4251 5c947f38 Iustin Pop
4252 5c947f38 Iustin Pop
  def CheckPrereq(self):
4253 5c947f38 Iustin Pop
    """Check prerequisites.
4254 5c947f38 Iustin Pop

4255 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4256 5c947f38 Iustin Pop

4257 5c947f38 Iustin Pop
    """
4258 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4259 f27302fa Iustin Pop
    for tag in self.op.tags:
4260 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4261 5c947f38 Iustin Pop
4262 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4263 5c947f38 Iustin Pop
    """Sets the tag.
4264 5c947f38 Iustin Pop

4265 5c947f38 Iustin Pop
    """
4266 5c947f38 Iustin Pop
    try:
4267 f27302fa Iustin Pop
      for tag in self.op.tags:
4268 f27302fa Iustin Pop
        self.target.AddTag(tag)
4269 5c947f38 Iustin Pop
    except errors.TagError, err:
4270 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4271 5c947f38 Iustin Pop
    try:
4272 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4273 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4274 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4275 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4276 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4277 5c947f38 Iustin Pop
4278 5c947f38 Iustin Pop
4279 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4280 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4281 5c947f38 Iustin Pop

4282 5c947f38 Iustin Pop
  """
4283 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4284 5c947f38 Iustin Pop
4285 5c947f38 Iustin Pop
  def CheckPrereq(self):
4286 5c947f38 Iustin Pop
    """Check prerequisites.
4287 5c947f38 Iustin Pop

4288 5c947f38 Iustin Pop
    This checks that we have the given tag.
4289 5c947f38 Iustin Pop

4290 5c947f38 Iustin Pop
    """
4291 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4292 f27302fa Iustin Pop
    for tag in self.op.tags:
4293 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4294 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4295 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4296 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4297 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4298 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4299 f27302fa Iustin Pop
      diff_names.sort()
4300 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4301 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4302 5c947f38 Iustin Pop
4303 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4304 5c947f38 Iustin Pop
    """Remove the tag from the object.
4305 5c947f38 Iustin Pop

4306 5c947f38 Iustin Pop
    """
4307 f27302fa Iustin Pop
    for tag in self.op.tags:
4308 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4309 5c947f38 Iustin Pop
    try:
4310 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4311 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4312 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4313 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4314 3ecf6786 Iustin Pop
                                " aborted. Please retry.")