Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 30989e69

History | View | Annotate | Download (145.6 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 a8083063 Iustin Pop
46 a8083063 Iustin Pop
class LogicalUnit(object):
47 396e1b78 Michael Hanselmann
  """Logical Unit base class.
48 a8083063 Iustin Pop

49 a8083063 Iustin Pop
  Subclasses must follow these rules:
50 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
51 a8083063 Iustin Pop
      with all the fields (even if as None)
52 a8083063 Iustin Pop
    - implement Exec
53 a8083063 Iustin Pop
    - implement BuildHooksEnv
54 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
55 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
56 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
57 a8083063 Iustin Pop

58 a8083063 Iustin Pop
  """
59 a8083063 Iustin Pop
  HPATH = None
60 a8083063 Iustin Pop
  HTYPE = None
61 a8083063 Iustin Pop
  _OP_REQP = []
62 a8083063 Iustin Pop
  REQ_CLUSTER = True
63 a8083063 Iustin Pop
  REQ_MASTER = True
64 a8083063 Iustin Pop
65 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
66 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
67 a8083063 Iustin Pop

68 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
69 a8083063 Iustin Pop
    validity.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    """
72 5bfac263 Iustin Pop
    self.proc = processor
73 a8083063 Iustin Pop
    self.op = op
74 a8083063 Iustin Pop
    self.cfg = cfg
75 a8083063 Iustin Pop
    self.sstore = sstore
76 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
77 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
78 a8083063 Iustin Pop
      if attr_val is None:
79 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
80 3ecf6786 Iustin Pop
                                   attr_name)
81 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
82 a8083063 Iustin Pop
      if not cfg.IsCluster():
83 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
84 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
85 a8083063 Iustin Pop
      if self.REQ_MASTER:
86 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
87 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
88 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
89 3ecf6786 Iustin Pop
                                     " node %s" % master)
90 a8083063 Iustin Pop
91 a8083063 Iustin Pop
  def CheckPrereq(self):
92 a8083063 Iustin Pop
    """Check prerequisites for this LU.
93 a8083063 Iustin Pop

94 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
95 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
96 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
97 a8083063 Iustin Pop
    allowed.
98 a8083063 Iustin Pop

99 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
100 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
101 a8083063 Iustin Pop

102 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
103 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
104 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
105 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
106 a8083063 Iustin Pop

107 a8083063 Iustin Pop
    """
108 a8083063 Iustin Pop
    raise NotImplementedError
109 a8083063 Iustin Pop
110 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
111 a8083063 Iustin Pop
    """Execute the LU.
112 a8083063 Iustin Pop

113 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
114 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
115 a8083063 Iustin Pop
    code, or expected.
116 a8083063 Iustin Pop

117 a8083063 Iustin Pop
    """
118 a8083063 Iustin Pop
    raise NotImplementedError
119 a8083063 Iustin Pop
120 a8083063 Iustin Pop
  def BuildHooksEnv(self):
121 a8083063 Iustin Pop
    """Build hooks environment for this LU.
122 a8083063 Iustin Pop

123 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
124 a8083063 Iustin Pop
    containing the environment that will be used for running the
125 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
126 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
127 a8083063 Iustin Pop
    the hook should run after the execution.
128 a8083063 Iustin Pop

129 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
130 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
131 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
132 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
133 a8083063 Iustin Pop

134 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
135 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
136 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
137 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
138 a8083063 Iustin Pop
    None).
139 a8083063 Iustin Pop

140 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
141 a8083063 Iustin Pop
    not be called.
142 a8083063 Iustin Pop

143 a8083063 Iustin Pop
    """
144 a8083063 Iustin Pop
    raise NotImplementedError
145 a8083063 Iustin Pop
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
148 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
149 a8083063 Iustin Pop

150 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
151 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
152 a8083063 Iustin Pop

153 a8083063 Iustin Pop
  """
154 a8083063 Iustin Pop
  HPATH = None
155 a8083063 Iustin Pop
  HTYPE = None
156 a8083063 Iustin Pop
157 a8083063 Iustin Pop
  def BuildHooksEnv(self):
158 a8083063 Iustin Pop
    """Build hooks env.
159 a8083063 Iustin Pop

160 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
161 a8083063 Iustin Pop

162 a8083063 Iustin Pop
    """
163 0e137c28 Iustin Pop
    return {}, [], []
164 a8083063 Iustin Pop
165 a8083063 Iustin Pop
166 9440aeab Michael Hanselmann
def _AddHostToEtcHosts(hostname):
167 9440aeab Michael Hanselmann
  """Wrapper around utils.SetEtcHostsEntry.
168 9440aeab Michael Hanselmann

169 9440aeab Michael Hanselmann
  """
170 9440aeab Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
171 9440aeab Michael Hanselmann
  utils.SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
172 9440aeab Michael Hanselmann
173 9440aeab Michael Hanselmann
174 c8a0948f Michael Hanselmann
def _RemoveHostFromEtcHosts(hostname):
175 9440aeab Michael Hanselmann
  """Wrapper around utils.RemoveEtcHostsEntry.
176 c8a0948f Michael Hanselmann

177 c8a0948f Michael Hanselmann
  """
178 c8a0948f Michael Hanselmann
  hi = utils.HostInfo(name=hostname)
179 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.name)
180 c8a0948f Michael Hanselmann
  utils.RemoveEtcHostsEntry(constants.ETC_HOSTS, hi.ShortName())
181 c8a0948f Michael Hanselmann
182 c8a0948f Michael Hanselmann
183 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
184 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
185 83120a01 Michael Hanselmann

186 83120a01 Michael Hanselmann
  Args:
187 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
188 83120a01 Michael Hanselmann

189 83120a01 Michael Hanselmann
  """
190 3312b702 Iustin Pop
  if not isinstance(nodes, list):
191 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
192 dcb93971 Michael Hanselmann
193 dcb93971 Michael Hanselmann
  if nodes:
194 3312b702 Iustin Pop
    wanted = []
195 dcb93971 Michael Hanselmann
196 dcb93971 Michael Hanselmann
    for name in nodes:
197 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
198 dcb93971 Michael Hanselmann
      if node is None:
199 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
200 3312b702 Iustin Pop
      wanted.append(node)
201 dcb93971 Michael Hanselmann
202 dcb93971 Michael Hanselmann
  else:
203 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
204 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
205 3312b702 Iustin Pop
206 3312b702 Iustin Pop
207 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
208 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
209 3312b702 Iustin Pop

210 3312b702 Iustin Pop
  Args:
211 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
212 3312b702 Iustin Pop

213 3312b702 Iustin Pop
  """
214 3312b702 Iustin Pop
  if not isinstance(instances, list):
215 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
216 3312b702 Iustin Pop
217 3312b702 Iustin Pop
  if instances:
218 3312b702 Iustin Pop
    wanted = []
219 3312b702 Iustin Pop
220 3312b702 Iustin Pop
    for name in instances:
221 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
222 3312b702 Iustin Pop
      if instance is None:
223 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
224 3312b702 Iustin Pop
      wanted.append(instance)
225 3312b702 Iustin Pop
226 3312b702 Iustin Pop
  else:
227 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
228 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
229 dcb93971 Michael Hanselmann
230 dcb93971 Michael Hanselmann
231 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
232 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
233 83120a01 Michael Hanselmann

234 83120a01 Michael Hanselmann
  Args:
235 83120a01 Michael Hanselmann
    static: Static fields
236 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
237 83120a01 Michael Hanselmann

238 83120a01 Michael Hanselmann
  """
239 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
240 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
241 dcb93971 Michael Hanselmann
242 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
243 dcb93971 Michael Hanselmann
244 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
245 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
246 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
247 3ecf6786 Iustin Pop
                                          difference(all_fields)))
248 dcb93971 Michael Hanselmann
249 dcb93971 Michael Hanselmann
250 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
251 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
252 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
253 ecb215b5 Michael Hanselmann

254 ecb215b5 Michael Hanselmann
  Args:
255 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
256 396e1b78 Michael Hanselmann
  """
257 396e1b78 Michael Hanselmann
  env = {
258 0e137c28 Iustin Pop
    "OP_TARGET": name,
259 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
260 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
261 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
262 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
263 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
264 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
265 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
266 396e1b78 Michael Hanselmann
  }
267 396e1b78 Michael Hanselmann
268 396e1b78 Michael Hanselmann
  if nics:
269 396e1b78 Michael Hanselmann
    nic_count = len(nics)
270 396e1b78 Michael Hanselmann
    for idx, (ip, bridge) in enumerate(nics):
271 396e1b78 Michael Hanselmann
      if ip is None:
272 396e1b78 Michael Hanselmann
        ip = ""
273 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
274 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
275 396e1b78 Michael Hanselmann
  else:
276 396e1b78 Michael Hanselmann
    nic_count = 0
277 396e1b78 Michael Hanselmann
278 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
279 396e1b78 Michael Hanselmann
280 396e1b78 Michael Hanselmann
  return env
281 396e1b78 Michael Hanselmann
282 396e1b78 Michael Hanselmann
283 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
284 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
285 ecb215b5 Michael Hanselmann

286 ecb215b5 Michael Hanselmann
  Args:
287 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
288 ecb215b5 Michael Hanselmann
    override: dict of values to override
289 ecb215b5 Michael Hanselmann
  """
290 396e1b78 Michael Hanselmann
  args = {
291 396e1b78 Michael Hanselmann
    'name': instance.name,
292 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
293 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
294 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
295 396e1b78 Michael Hanselmann
    'status': instance.os,
296 396e1b78 Michael Hanselmann
    'memory': instance.memory,
297 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
298 396e1b78 Michael Hanselmann
    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
299 396e1b78 Michael Hanselmann
  }
300 396e1b78 Michael Hanselmann
  if override:
301 396e1b78 Michael Hanselmann
    args.update(override)
302 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
303 396e1b78 Michael Hanselmann
304 396e1b78 Michael Hanselmann
305 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
306 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
307 a8083063 Iustin Pop

308 a8083063 Iustin Pop
  Args:
309 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
310 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
311 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
312 a8083063 Iustin Pop

313 a8083063 Iustin Pop
  """
314 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
315 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
316 a8083063 Iustin Pop
  else:
317 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
318 a8083063 Iustin Pop
319 a8083063 Iustin Pop
  inthere = False
320 a8083063 Iustin Pop
321 a8083063 Iustin Pop
  save_lines = []
322 a8083063 Iustin Pop
  add_lines = []
323 a8083063 Iustin Pop
  removed = False
324 a8083063 Iustin Pop
325 4cc2a728 Michael Hanselmann
  for rawline in f:
326 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
327 a8083063 Iustin Pop
328 4cc2a728 Michael Hanselmann
    parts = rawline.rstrip('\r\n').split()
329 4cc2a728 Michael Hanselmann
330 4cc2a728 Michael Hanselmann
    # Ignore unwanted lines
331 4cc2a728 Michael Hanselmann
    if len(parts) >= 3 and not rawline.lstrip()[0] == '#':
332 4cc2a728 Michael Hanselmann
      fields = parts[0].split(',')
333 4cc2a728 Michael Hanselmann
      key = parts[2]
334 4cc2a728 Michael Hanselmann
335 4cc2a728 Michael Hanselmann
      haveall = True
336 4cc2a728 Michael Hanselmann
      havesome = False
337 4cc2a728 Michael Hanselmann
      for spec in [ ip, fullnode ]:
338 4cc2a728 Michael Hanselmann
        if spec not in fields:
339 4cc2a728 Michael Hanselmann
          haveall = False
340 4cc2a728 Michael Hanselmann
        if spec in fields:
341 4cc2a728 Michael Hanselmann
          havesome = True
342 4cc2a728 Michael Hanselmann
343 4cc2a728 Michael Hanselmann
      logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
344 4cc2a728 Michael Hanselmann
      if haveall and key == pubkey:
345 4cc2a728 Michael Hanselmann
        inthere = True
346 4cc2a728 Michael Hanselmann
        save_lines.append(rawline)
347 4cc2a728 Michael Hanselmann
        logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
348 4cc2a728 Michael Hanselmann
        continue
349 4cc2a728 Michael Hanselmann
350 4cc2a728 Michael Hanselmann
      if havesome and (not haveall or key != pubkey):
351 4cc2a728 Michael Hanselmann
        removed = True
352 4cc2a728 Michael Hanselmann
        logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
353 4cc2a728 Michael Hanselmann
        continue
354 a8083063 Iustin Pop
355 a8083063 Iustin Pop
    save_lines.append(rawline)
356 a8083063 Iustin Pop
357 a8083063 Iustin Pop
  if not inthere:
358 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
359 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
360 a8083063 Iustin Pop
361 a8083063 Iustin Pop
  if removed:
362 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
363 a8083063 Iustin Pop
364 a8083063 Iustin Pop
    # Write a new file and replace old.
365 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
366 82122173 Iustin Pop
                                   constants.DATA_DIR)
367 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
368 82122173 Iustin Pop
    try:
369 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
370 82122173 Iustin Pop
    finally:
371 82122173 Iustin Pop
      newfile.close()
372 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
373 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
374 a8083063 Iustin Pop
375 a8083063 Iustin Pop
  elif add_lines:
376 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
377 a8083063 Iustin Pop
    f.seek(0, 2)
378 a8083063 Iustin Pop
    for add in add_lines:
379 a8083063 Iustin Pop
      f.write(add)
380 a8083063 Iustin Pop
381 a8083063 Iustin Pop
  f.close()
382 a8083063 Iustin Pop
383 a8083063 Iustin Pop
384 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
385 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
386 a8083063 Iustin Pop

387 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
388 a8083063 Iustin Pop
  is the error message.
389 a8083063 Iustin Pop

390 a8083063 Iustin Pop
  """
391 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
392 a8083063 Iustin Pop
  if vgsize is None:
393 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
394 a8083063 Iustin Pop
  elif vgsize < 20480:
395 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
396 191a8385 Guido Trotter
            (vgname, vgsize))
397 a8083063 Iustin Pop
  return None
398 a8083063 Iustin Pop
399 a8083063 Iustin Pop
400 a8083063 Iustin Pop
def _InitSSHSetup(node):
401 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
402 a8083063 Iustin Pop

403 a8083063 Iustin Pop

404 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
405 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
406 a8083063 Iustin Pop

407 a8083063 Iustin Pop
  Args:
408 a8083063 Iustin Pop
    node: the name of this host as a fqdn
409 a8083063 Iustin Pop

410 a8083063 Iustin Pop
  """
411 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
412 a8083063 Iustin Pop
413 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
414 70d9e3d8 Iustin Pop
    if os.path.exists(name):
415 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
416 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
417 a8083063 Iustin Pop
418 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
419 70d9e3d8 Iustin Pop
                         "-f", priv_key,
420 a8083063 Iustin Pop
                         "-q", "-N", ""])
421 a8083063 Iustin Pop
  if result.failed:
422 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
423 3ecf6786 Iustin Pop
                             result.output)
424 a8083063 Iustin Pop
425 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
426 a8083063 Iustin Pop
  try:
427 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
428 a8083063 Iustin Pop
  finally:
429 a8083063 Iustin Pop
    f.close()
430 a8083063 Iustin Pop
431 a8083063 Iustin Pop
432 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
433 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
434 a8083063 Iustin Pop

435 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
436 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
437 a8083063 Iustin Pop

438 a8083063 Iustin Pop
  """
439 a8083063 Iustin Pop
  # Create pseudo random password
440 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
441 a8083063 Iustin Pop
  # and write it into sstore
442 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
443 a8083063 Iustin Pop
444 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
445 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
446 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
447 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
448 a8083063 Iustin Pop
  if result.failed:
449 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
450 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
451 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
452 a8083063 Iustin Pop
453 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
454 a8083063 Iustin Pop
455 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
456 a8083063 Iustin Pop
457 a8083063 Iustin Pop
  if result.failed:
458 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
459 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
460 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
461 a8083063 Iustin Pop
462 a8083063 Iustin Pop
463 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
464 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
465 bf6929a2 Alexander Schreiber

466 bf6929a2 Alexander Schreiber
  """
467 bf6929a2 Alexander Schreiber
  # check bridges existance
468 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
469 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
470 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
471 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
472 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
473 bf6929a2 Alexander Schreiber
474 bf6929a2 Alexander Schreiber
475 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
476 a8083063 Iustin Pop
  """Initialise the cluster.
477 a8083063 Iustin Pop

478 a8083063 Iustin Pop
  """
479 a8083063 Iustin Pop
  HPATH = "cluster-init"
480 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
481 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
482 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
483 a8083063 Iustin Pop
  REQ_CLUSTER = False
484 a8083063 Iustin Pop
485 a8083063 Iustin Pop
  def BuildHooksEnv(self):
486 a8083063 Iustin Pop
    """Build hooks env.
487 a8083063 Iustin Pop

488 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
489 a8083063 Iustin Pop
    ourselves in the post-run node list.
490 a8083063 Iustin Pop

491 a8083063 Iustin Pop
    """
492 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
493 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
494 a8083063 Iustin Pop
495 a8083063 Iustin Pop
  def CheckPrereq(self):
496 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
497 a8083063 Iustin Pop

498 a8083063 Iustin Pop
    """
499 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
500 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
501 a8083063 Iustin Pop
502 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
503 ff98055b Iustin Pop
504 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
505 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
506 130e907e Iustin Pop
                                 " range (%s). Please fix DNS or /etc/hosts." %
507 bcf043c9 Iustin Pop
                                 (hostname.ip,))
508 130e907e Iustin Pop
509 89e1fc26 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
510 a8083063 Iustin Pop
511 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, hostname.ip,
512 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
513 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
514 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
515 3ecf6786 Iustin Pop
                                 " belong to this host."
516 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
517 a8083063 Iustin Pop
518 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
519 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
520 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
521 16abfbc2 Alexander Schreiber
    if (secondary_ip and
522 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
523 16abfbc2 Alexander Schreiber
        (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
524 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT))):
525 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("You gave %s as secondary IP,"
526 f4bc1f2c Michael Hanselmann
                                 " but it does not belong to this host." %
527 16abfbc2 Alexander Schreiber
                                 secondary_ip)
528 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
529 a8083063 Iustin Pop
530 a8083063 Iustin Pop
    # checks presence of the volume group given
531 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
532 a8083063 Iustin Pop
533 a8083063 Iustin Pop
    if vgstatus:
534 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
535 a8083063 Iustin Pop
536 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
537 a8083063 Iustin Pop
                    self.op.mac_prefix):
538 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
539 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
540 a8083063 Iustin Pop
541 2584d4a4 Alexander Schreiber
    if self.op.hypervisor_type not in constants.HYPER_TYPES:
542 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
543 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
544 a8083063 Iustin Pop
545 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
546 880478f8 Iustin Pop
    if result.failed:
547 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
548 8925faaa Iustin Pop
                                 (self.op.master_netdev,
549 8925faaa Iustin Pop
                                  result.output.strip()))
550 880478f8 Iustin Pop
551 7dd30006 Michael Hanselmann
    if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
552 7dd30006 Michael Hanselmann
            os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
553 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("Init.d script '%s' missing or not"
554 f4bc1f2c Michael Hanselmann
                                 " executable." % constants.NODE_INITD_SCRIPT)
555 c7b46d59 Iustin Pop
556 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
557 a8083063 Iustin Pop
    """Initialize the cluster.
558 a8083063 Iustin Pop

559 a8083063 Iustin Pop
    """
560 a8083063 Iustin Pop
    clustername = self.clustername
561 a8083063 Iustin Pop
    hostname = self.hostname
562 a8083063 Iustin Pop
563 a8083063 Iustin Pop
    # set up the simple store
564 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
565 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
566 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
567 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
568 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
569 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
570 a8083063 Iustin Pop
571 a8083063 Iustin Pop
    # set up the inter-node password and certificate
572 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
573 a8083063 Iustin Pop
574 a8083063 Iustin Pop
    # start the master ip
575 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
576 a8083063 Iustin Pop
577 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
578 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
579 a8083063 Iustin Pop
    try:
580 a8083063 Iustin Pop
      sshline = f.read()
581 a8083063 Iustin Pop
    finally:
582 a8083063 Iustin Pop
      f.close()
583 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
584 a8083063 Iustin Pop
585 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(hostname.name)
586 a8083063 Iustin Pop
587 bcf043c9 Iustin Pop
    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
588 a8083063 Iustin Pop
589 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
590 a8083063 Iustin Pop
591 a8083063 Iustin Pop
    # init of cluster config file
592 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
593 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
594 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
595 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
596 a8083063 Iustin Pop
597 a8083063 Iustin Pop
598 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
599 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
600 a8083063 Iustin Pop

601 a8083063 Iustin Pop
  """
602 a8083063 Iustin Pop
  _OP_REQP = []
603 a8083063 Iustin Pop
604 a8083063 Iustin Pop
  def CheckPrereq(self):
605 a8083063 Iustin Pop
    """Check prerequisites.
606 a8083063 Iustin Pop

607 a8083063 Iustin Pop
    This checks whether the cluster is empty.
608 a8083063 Iustin Pop

609 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
610 a8083063 Iustin Pop

611 a8083063 Iustin Pop
    """
612 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
615 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
616 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
617 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
618 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
619 db915bd1 Michael Hanselmann
    if instancelist:
620 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
621 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
622 a8083063 Iustin Pop
623 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
624 a8083063 Iustin Pop
    """Destroys the cluster.
625 a8083063 Iustin Pop

626 a8083063 Iustin Pop
    """
627 c8a0948f Michael Hanselmann
    master = self.sstore.GetMasterNode()
628 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
629 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
630 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
631 c8a0948f Michael Hanselmann
    rpc.call_node_leave_cluster(master)
632 a8083063 Iustin Pop
633 a8083063 Iustin Pop
634 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
635 a8083063 Iustin Pop
  """Verifies the cluster status.
636 a8083063 Iustin Pop

637 a8083063 Iustin Pop
  """
638 a8083063 Iustin Pop
  _OP_REQP = []
639 a8083063 Iustin Pop
640 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
641 a8083063 Iustin Pop
                  remote_version, feedback_fn):
642 a8083063 Iustin Pop
    """Run multiple tests against a node.
643 a8083063 Iustin Pop

644 a8083063 Iustin Pop
    Test list:
645 a8083063 Iustin Pop
      - compares ganeti version
646 a8083063 Iustin Pop
      - checks vg existance and size > 20G
647 a8083063 Iustin Pop
      - checks config file checksum
648 a8083063 Iustin Pop
      - checks ssh to other nodes
649 a8083063 Iustin Pop

650 a8083063 Iustin Pop
    Args:
651 a8083063 Iustin Pop
      node: name of the node to check
652 a8083063 Iustin Pop
      file_list: required list of files
653 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
654 098c0958 Michael Hanselmann

655 a8083063 Iustin Pop
    """
656 a8083063 Iustin Pop
    # compares ganeti version
657 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
658 a8083063 Iustin Pop
    if not remote_version:
659 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
660 a8083063 Iustin Pop
      return True
661 a8083063 Iustin Pop
662 a8083063 Iustin Pop
    if local_version != remote_version:
663 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
664 a8083063 Iustin Pop
                      (local_version, node, remote_version))
665 a8083063 Iustin Pop
      return True
666 a8083063 Iustin Pop
667 a8083063 Iustin Pop
    # checks vg existance and size > 20G
668 a8083063 Iustin Pop
669 a8083063 Iustin Pop
    bad = False
670 a8083063 Iustin Pop
    if not vglist:
671 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
672 a8083063 Iustin Pop
                      (node,))
673 a8083063 Iustin Pop
      bad = True
674 a8083063 Iustin Pop
    else:
675 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
676 a8083063 Iustin Pop
      if vgstatus:
677 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
678 a8083063 Iustin Pop
        bad = True
679 a8083063 Iustin Pop
680 a8083063 Iustin Pop
    # checks config file checksum
681 a8083063 Iustin Pop
    # checks ssh to any
682 a8083063 Iustin Pop
683 a8083063 Iustin Pop
    if 'filelist' not in node_result:
684 a8083063 Iustin Pop
      bad = True
685 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
686 a8083063 Iustin Pop
    else:
687 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
688 a8083063 Iustin Pop
      for file_name in file_list:
689 a8083063 Iustin Pop
        if file_name not in remote_cksum:
690 a8083063 Iustin Pop
          bad = True
691 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
692 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
693 a8083063 Iustin Pop
          bad = True
694 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
695 a8083063 Iustin Pop
696 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
697 a8083063 Iustin Pop
      bad = True
698 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
699 a8083063 Iustin Pop
    else:
700 a8083063 Iustin Pop
      if node_result['nodelist']:
701 a8083063 Iustin Pop
        bad = True
702 a8083063 Iustin Pop
        for node in node_result['nodelist']:
703 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
704 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
705 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
706 a8083063 Iustin Pop
    if hyp_result is not None:
707 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
708 a8083063 Iustin Pop
    return bad
709 a8083063 Iustin Pop
710 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
711 a8083063 Iustin Pop
    """Verify an instance.
712 a8083063 Iustin Pop

713 a8083063 Iustin Pop
    This function checks to see if the required block devices are
714 a8083063 Iustin Pop
    available on the instance's node.
715 a8083063 Iustin Pop

716 a8083063 Iustin Pop
    """
717 a8083063 Iustin Pop
    bad = False
718 a8083063 Iustin Pop
719 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
720 a8083063 Iustin Pop
    if not instance in instancelist:
721 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
722 a8083063 Iustin Pop
                      (instance, instancelist))
723 a8083063 Iustin Pop
      bad = True
724 a8083063 Iustin Pop
725 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
726 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
727 a8083063 Iustin Pop
728 a8083063 Iustin Pop
    node_vol_should = {}
729 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
730 a8083063 Iustin Pop
731 a8083063 Iustin Pop
    for node in node_vol_should:
732 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
733 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
734 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
735 a8083063 Iustin Pop
                          (volume, node))
736 a8083063 Iustin Pop
          bad = True
737 a8083063 Iustin Pop
738 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
739 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
740 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
741 a8083063 Iustin Pop
                        (instance, node_current))
742 a8083063 Iustin Pop
        bad = True
743 a8083063 Iustin Pop
744 a8083063 Iustin Pop
    for node in node_instance:
745 a8083063 Iustin Pop
      if (not node == node_current):
746 a8083063 Iustin Pop
        if instance in node_instance[node]:
747 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
748 a8083063 Iustin Pop
                          (instance, node))
749 a8083063 Iustin Pop
          bad = True
750 a8083063 Iustin Pop
751 6a438c98 Michael Hanselmann
    return bad
752 a8083063 Iustin Pop
753 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
754 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
755 a8083063 Iustin Pop

756 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
757 a8083063 Iustin Pop
    reported as unknown.
758 a8083063 Iustin Pop

759 a8083063 Iustin Pop
    """
760 a8083063 Iustin Pop
    bad = False
761 a8083063 Iustin Pop
762 a8083063 Iustin Pop
    for node in node_vol_is:
763 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
764 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
765 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
766 a8083063 Iustin Pop
                      (volume, node))
767 a8083063 Iustin Pop
          bad = True
768 a8083063 Iustin Pop
    return bad
769 a8083063 Iustin Pop
770 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
771 a8083063 Iustin Pop
    """Verify the list of running instances.
772 a8083063 Iustin Pop

773 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
774 a8083063 Iustin Pop

775 a8083063 Iustin Pop
    """
776 a8083063 Iustin Pop
    bad = False
777 a8083063 Iustin Pop
    for node in node_instance:
778 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
779 a8083063 Iustin Pop
        if runninginstance not in instancelist:
780 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
781 a8083063 Iustin Pop
                          (runninginstance, node))
782 a8083063 Iustin Pop
          bad = True
783 a8083063 Iustin Pop
    return bad
784 a8083063 Iustin Pop
785 a8083063 Iustin Pop
  def CheckPrereq(self):
786 a8083063 Iustin Pop
    """Check prerequisites.
787 a8083063 Iustin Pop

788 a8083063 Iustin Pop
    This has no prerequisites.
789 a8083063 Iustin Pop

790 a8083063 Iustin Pop
    """
791 a8083063 Iustin Pop
    pass
792 a8083063 Iustin Pop
793 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
794 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
795 a8083063 Iustin Pop

796 a8083063 Iustin Pop
    """
797 a8083063 Iustin Pop
    bad = False
798 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
799 8522ceeb Iustin Pop
    for msg in self.cfg.VerifyConfig():
800 8522ceeb Iustin Pop
      feedback_fn("  - ERROR: %s" % msg)
801 a8083063 Iustin Pop
802 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
803 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
804 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
805 a8083063 Iustin Pop
    node_volume = {}
806 a8083063 Iustin Pop
    node_instance = {}
807 a8083063 Iustin Pop
808 a8083063 Iustin Pop
    # FIXME: verify OS list
809 a8083063 Iustin Pop
    # do local checksums
810 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
811 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
812 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
813 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
814 a8083063 Iustin Pop
815 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
816 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
817 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
818 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
819 a8083063 Iustin Pop
    node_verify_param = {
820 a8083063 Iustin Pop
      'filelist': file_names,
821 a8083063 Iustin Pop
      'nodelist': nodelist,
822 a8083063 Iustin Pop
      'hypervisor': None,
823 a8083063 Iustin Pop
      }
824 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
825 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
826 a8083063 Iustin Pop
827 a8083063 Iustin Pop
    for node in nodelist:
828 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
829 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
830 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
831 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
832 a8083063 Iustin Pop
      bad = bad or result
833 a8083063 Iustin Pop
834 a8083063 Iustin Pop
      # node_volume
835 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
836 a8083063 Iustin Pop
837 a8083063 Iustin Pop
      if type(volumeinfo) != dict:
838 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
839 a8083063 Iustin Pop
        bad = True
840 a8083063 Iustin Pop
        continue
841 a8083063 Iustin Pop
842 a8083063 Iustin Pop
      node_volume[node] = volumeinfo
843 a8083063 Iustin Pop
844 a8083063 Iustin Pop
      # node_instance
845 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
846 a8083063 Iustin Pop
      if type(nodeinstance) != list:
847 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
848 a8083063 Iustin Pop
        bad = True
849 a8083063 Iustin Pop
        continue
850 a8083063 Iustin Pop
851 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
852 a8083063 Iustin Pop
853 a8083063 Iustin Pop
    node_vol_should = {}
854 a8083063 Iustin Pop
855 a8083063 Iustin Pop
    for instance in instancelist:
856 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
857 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
858 a8083063 Iustin Pop
                                     feedback_fn)
859 a8083063 Iustin Pop
      bad = bad or result
860 a8083063 Iustin Pop
861 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
862 a8083063 Iustin Pop
863 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
864 a8083063 Iustin Pop
865 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
866 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
867 a8083063 Iustin Pop
                                       feedback_fn)
868 a8083063 Iustin Pop
    bad = bad or result
869 a8083063 Iustin Pop
870 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
871 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
872 a8083063 Iustin Pop
                                         feedback_fn)
873 a8083063 Iustin Pop
    bad = bad or result
874 a8083063 Iustin Pop
875 a8083063 Iustin Pop
    return int(bad)
876 a8083063 Iustin Pop
877 a8083063 Iustin Pop
878 2c95a8d4 Iustin Pop
class LUVerifyDisks(NoHooksLU):
879 2c95a8d4 Iustin Pop
  """Verifies the cluster disks status.
880 2c95a8d4 Iustin Pop

881 2c95a8d4 Iustin Pop
  """
882 2c95a8d4 Iustin Pop
  _OP_REQP = []
883 2c95a8d4 Iustin Pop
884 2c95a8d4 Iustin Pop
  def CheckPrereq(self):
885 2c95a8d4 Iustin Pop
    """Check prerequisites.
886 2c95a8d4 Iustin Pop

887 2c95a8d4 Iustin Pop
    This has no prerequisites.
888 2c95a8d4 Iustin Pop

889 2c95a8d4 Iustin Pop
    """
890 2c95a8d4 Iustin Pop
    pass
891 2c95a8d4 Iustin Pop
892 2c95a8d4 Iustin Pop
  def Exec(self, feedback_fn):
893 2c95a8d4 Iustin Pop
    """Verify integrity of cluster disks.
894 2c95a8d4 Iustin Pop

895 2c95a8d4 Iustin Pop
    """
896 2c95a8d4 Iustin Pop
    result = res_nodes, res_instances = [], []
897 2c95a8d4 Iustin Pop
898 2c95a8d4 Iustin Pop
    vg_name = self.cfg.GetVGName()
899 2c95a8d4 Iustin Pop
    nodes = utils.NiceSort(self.cfg.GetNodeList())
900 2c95a8d4 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
901 2c95a8d4 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
902 2c95a8d4 Iustin Pop
903 2c95a8d4 Iustin Pop
    nv_dict = {}
904 2c95a8d4 Iustin Pop
    for inst in instances:
905 2c95a8d4 Iustin Pop
      inst_lvs = {}
906 2c95a8d4 Iustin Pop
      if (inst.status != "up" or
907 2c95a8d4 Iustin Pop
          inst.disk_template not in constants.DTS_NET_MIRROR):
908 2c95a8d4 Iustin Pop
        continue
909 2c95a8d4 Iustin Pop
      inst.MapLVsByNode(inst_lvs)
910 2c95a8d4 Iustin Pop
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
911 2c95a8d4 Iustin Pop
      for node, vol_list in inst_lvs.iteritems():
912 2c95a8d4 Iustin Pop
        for vol in vol_list:
913 2c95a8d4 Iustin Pop
          nv_dict[(node, vol)] = inst
914 2c95a8d4 Iustin Pop
915 2c95a8d4 Iustin Pop
    if not nv_dict:
916 2c95a8d4 Iustin Pop
      return result
917 2c95a8d4 Iustin Pop
918 2c95a8d4 Iustin Pop
    node_lvs = rpc.call_volume_list(nodes, vg_name)
919 2c95a8d4 Iustin Pop
920 2c95a8d4 Iustin Pop
    to_act = set()
921 2c95a8d4 Iustin Pop
    for node in nodes:
922 2c95a8d4 Iustin Pop
      # node_volume
923 2c95a8d4 Iustin Pop
      lvs = node_lvs[node]
924 2c95a8d4 Iustin Pop
925 2c95a8d4 Iustin Pop
      if not isinstance(lvs, dict):
926 2c95a8d4 Iustin Pop
        logger.Info("connection to node %s failed or invalid data returned" %
927 2c95a8d4 Iustin Pop
                    (node,))
928 2c95a8d4 Iustin Pop
        res_nodes.append(node)
929 2c95a8d4 Iustin Pop
        continue
930 2c95a8d4 Iustin Pop
931 2c95a8d4 Iustin Pop
      for lv_name, (_, lv_inactive, lv_online) in lvs.iteritems():
932 2c95a8d4 Iustin Pop
        if not lv_online:
933 2c95a8d4 Iustin Pop
          inst = nv_dict.get((node, lv_name), None)
934 2c95a8d4 Iustin Pop
          if inst is not None and inst.name not in res_instances:
935 2c95a8d4 Iustin Pop
            res_instances.append(inst.name)
936 2c95a8d4 Iustin Pop
937 2c95a8d4 Iustin Pop
    return result
938 2c95a8d4 Iustin Pop
939 2c95a8d4 Iustin Pop
940 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
941 07bd8a51 Iustin Pop
  """Rename the cluster.
942 07bd8a51 Iustin Pop

943 07bd8a51 Iustin Pop
  """
944 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
945 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
946 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
947 07bd8a51 Iustin Pop
948 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
949 07bd8a51 Iustin Pop
    """Build hooks env.
950 07bd8a51 Iustin Pop

951 07bd8a51 Iustin Pop
    """
952 07bd8a51 Iustin Pop
    env = {
953 0e137c28 Iustin Pop
      "OP_TARGET": self.op.sstore.GetClusterName(),
954 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
955 07bd8a51 Iustin Pop
      }
956 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
957 07bd8a51 Iustin Pop
    return env, [mn], [mn]
958 07bd8a51 Iustin Pop
959 07bd8a51 Iustin Pop
  def CheckPrereq(self):
960 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
961 07bd8a51 Iustin Pop

962 07bd8a51 Iustin Pop
    """
963 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
964 07bd8a51 Iustin Pop
965 bcf043c9 Iustin Pop
    new_name = hostname.name
966 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
967 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
968 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
969 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
970 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
971 07bd8a51 Iustin Pop
                                 " cluster has changed")
972 07bd8a51 Iustin Pop
    if new_ip != old_ip:
973 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
974 07bd8a51 Iustin Pop
      if not result.failed:
975 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
976 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
977 07bd8a51 Iustin Pop
                                   new_ip)
978 07bd8a51 Iustin Pop
979 07bd8a51 Iustin Pop
    self.op.name = new_name
980 07bd8a51 Iustin Pop
981 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
982 07bd8a51 Iustin Pop
    """Rename the cluster.
983 07bd8a51 Iustin Pop

984 07bd8a51 Iustin Pop
    """
985 07bd8a51 Iustin Pop
    clustername = self.op.name
986 07bd8a51 Iustin Pop
    ip = self.ip
987 07bd8a51 Iustin Pop
    ss = self.sstore
988 07bd8a51 Iustin Pop
989 07bd8a51 Iustin Pop
    # shutdown the master IP
990 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
991 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
992 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
993 07bd8a51 Iustin Pop
994 07bd8a51 Iustin Pop
    try:
995 07bd8a51 Iustin Pop
      # modify the sstore
996 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
997 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
998 07bd8a51 Iustin Pop
999 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1000 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1001 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1002 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1003 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1004 07bd8a51 Iustin Pop
1005 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1006 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1007 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1008 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1009 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1010 07bd8a51 Iustin Pop
          if not result[to_node]:
1011 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1012 07bd8a51 Iustin Pop
                         (fname, to_node))
1013 07bd8a51 Iustin Pop
    finally:
1014 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
1015 f4bc1f2c Michael Hanselmann
        logger.Error("Could not re-enable the master role on the master,"
1016 f4bc1f2c Michael Hanselmann
                     " please restart manually.")
1017 07bd8a51 Iustin Pop
1018 07bd8a51 Iustin Pop
1019 5bfac263 Iustin Pop
def _WaitForSync(cfgw, instance, proc, oneshot=False, unlock=False):
1020 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1021 a8083063 Iustin Pop

1022 a8083063 Iustin Pop
  """
1023 a8083063 Iustin Pop
  if not instance.disks:
1024 a8083063 Iustin Pop
    return True
1025 a8083063 Iustin Pop
1026 a8083063 Iustin Pop
  if not oneshot:
1027 5bfac263 Iustin Pop
    proc.LogInfo("Waiting for instance %s to sync disks." % instance.name)
1028 a8083063 Iustin Pop
1029 a8083063 Iustin Pop
  node = instance.primary_node
1030 a8083063 Iustin Pop
1031 a8083063 Iustin Pop
  for dev in instance.disks:
1032 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1033 a8083063 Iustin Pop
1034 a8083063 Iustin Pop
  retries = 0
1035 a8083063 Iustin Pop
  while True:
1036 a8083063 Iustin Pop
    max_time = 0
1037 a8083063 Iustin Pop
    done = True
1038 a8083063 Iustin Pop
    cumul_degraded = False
1039 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1040 a8083063 Iustin Pop
    if not rstats:
1041 5bfac263 Iustin Pop
      proc.LogWarning("Can't get any data from node %s" % node)
1042 a8083063 Iustin Pop
      retries += 1
1043 a8083063 Iustin Pop
      if retries >= 10:
1044 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1045 3ecf6786 Iustin Pop
                                 " aborting." % node)
1046 a8083063 Iustin Pop
      time.sleep(6)
1047 a8083063 Iustin Pop
      continue
1048 a8083063 Iustin Pop
    retries = 0
1049 a8083063 Iustin Pop
    for i in range(len(rstats)):
1050 a8083063 Iustin Pop
      mstat = rstats[i]
1051 a8083063 Iustin Pop
      if mstat is None:
1052 5bfac263 Iustin Pop
        proc.LogWarning("Can't compute data for node %s/%s" %
1053 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1054 a8083063 Iustin Pop
        continue
1055 0834c866 Iustin Pop
      # we ignore the ldisk parameter
1056 0834c866 Iustin Pop
      perc_done, est_time, is_degraded, _ = mstat
1057 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1058 a8083063 Iustin Pop
      if perc_done is not None:
1059 a8083063 Iustin Pop
        done = False
1060 a8083063 Iustin Pop
        if est_time is not None:
1061 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1062 a8083063 Iustin Pop
          max_time = est_time
1063 a8083063 Iustin Pop
        else:
1064 a8083063 Iustin Pop
          rem_time = "no time estimate"
1065 5bfac263 Iustin Pop
        proc.LogInfo("- device %s: %5.2f%% done, %s" %
1066 5bfac263 Iustin Pop
                     (instance.disks[i].iv_name, perc_done, rem_time))
1067 a8083063 Iustin Pop
    if done or oneshot:
1068 a8083063 Iustin Pop
      break
1069 a8083063 Iustin Pop
1070 a8083063 Iustin Pop
    if unlock:
1071 a8083063 Iustin Pop
      utils.Unlock('cmd')
1072 a8083063 Iustin Pop
    try:
1073 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1074 a8083063 Iustin Pop
    finally:
1075 a8083063 Iustin Pop
      if unlock:
1076 a8083063 Iustin Pop
        utils.Lock('cmd')
1077 a8083063 Iustin Pop
1078 a8083063 Iustin Pop
  if done:
1079 5bfac263 Iustin Pop
    proc.LogInfo("Instance %s's disks are in sync." % instance.name)
1080 a8083063 Iustin Pop
  return not cumul_degraded
1081 a8083063 Iustin Pop
1082 a8083063 Iustin Pop
1083 0834c866 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary, ldisk=False):
1084 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1085 a8083063 Iustin Pop

1086 0834c866 Iustin Pop
  The ldisk parameter, if True, will change the test from the
1087 0834c866 Iustin Pop
  is_degraded attribute (which represents overall non-ok status for
1088 0834c866 Iustin Pop
  the device(s)) to the ldisk (representing the local storage status).
1089 0834c866 Iustin Pop

1090 a8083063 Iustin Pop
  """
1091 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1092 0834c866 Iustin Pop
  if ldisk:
1093 0834c866 Iustin Pop
    idx = 6
1094 0834c866 Iustin Pop
  else:
1095 0834c866 Iustin Pop
    idx = 5
1096 a8083063 Iustin Pop
1097 a8083063 Iustin Pop
  result = True
1098 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1099 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1100 a8083063 Iustin Pop
    if not rstats:
1101 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1102 a8083063 Iustin Pop
      result = False
1103 a8083063 Iustin Pop
    else:
1104 0834c866 Iustin Pop
      result = result and (not rstats[idx])
1105 a8083063 Iustin Pop
  if dev.children:
1106 a8083063 Iustin Pop
    for child in dev.children:
1107 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1108 a8083063 Iustin Pop
1109 a8083063 Iustin Pop
  return result
1110 a8083063 Iustin Pop
1111 a8083063 Iustin Pop
1112 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1113 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1114 a8083063 Iustin Pop

1115 a8083063 Iustin Pop
  """
1116 a8083063 Iustin Pop
  _OP_REQP = []
1117 a8083063 Iustin Pop
1118 a8083063 Iustin Pop
  def CheckPrereq(self):
1119 a8083063 Iustin Pop
    """Check prerequisites.
1120 a8083063 Iustin Pop

1121 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1122 a8083063 Iustin Pop

1123 a8083063 Iustin Pop
    """
1124 a8083063 Iustin Pop
    return
1125 a8083063 Iustin Pop
1126 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1127 a8083063 Iustin Pop
    """Compute the list of OSes.
1128 a8083063 Iustin Pop

1129 a8083063 Iustin Pop
    """
1130 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1131 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1132 a8083063 Iustin Pop
    if node_data == False:
1133 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1134 a8083063 Iustin Pop
    return node_data
1135 a8083063 Iustin Pop
1136 a8083063 Iustin Pop
1137 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1138 a8083063 Iustin Pop
  """Logical unit for removing a node.
1139 a8083063 Iustin Pop

1140 a8083063 Iustin Pop
  """
1141 a8083063 Iustin Pop
  HPATH = "node-remove"
1142 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1143 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1144 a8083063 Iustin Pop
1145 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1146 a8083063 Iustin Pop
    """Build hooks env.
1147 a8083063 Iustin Pop

1148 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1149 a8083063 Iustin Pop
    node would not allows itself to run.
1150 a8083063 Iustin Pop

1151 a8083063 Iustin Pop
    """
1152 396e1b78 Michael Hanselmann
    env = {
1153 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1154 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1155 396e1b78 Michael Hanselmann
      }
1156 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1157 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1158 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1159 a8083063 Iustin Pop
1160 a8083063 Iustin Pop
  def CheckPrereq(self):
1161 a8083063 Iustin Pop
    """Check prerequisites.
1162 a8083063 Iustin Pop

1163 a8083063 Iustin Pop
    This checks:
1164 a8083063 Iustin Pop
     - the node exists in the configuration
1165 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1166 a8083063 Iustin Pop
     - it's not the master
1167 a8083063 Iustin Pop

1168 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1169 a8083063 Iustin Pop

1170 a8083063 Iustin Pop
    """
1171 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1172 a8083063 Iustin Pop
    if node is None:
1173 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1174 a8083063 Iustin Pop
1175 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1176 a8083063 Iustin Pop
1177 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1178 a8083063 Iustin Pop
    if node.name == masternode:
1179 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1180 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1181 a8083063 Iustin Pop
1182 a8083063 Iustin Pop
    for instance_name in instance_list:
1183 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1184 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1185 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1186 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1187 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1188 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1189 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1190 a8083063 Iustin Pop
    self.op.node_name = node.name
1191 a8083063 Iustin Pop
    self.node = node
1192 a8083063 Iustin Pop
1193 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1194 a8083063 Iustin Pop
    """Removes the node from the cluster.
1195 a8083063 Iustin Pop

1196 a8083063 Iustin Pop
    """
1197 a8083063 Iustin Pop
    node = self.node
1198 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1199 a8083063 Iustin Pop
                node.name)
1200 a8083063 Iustin Pop
1201 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1202 a8083063 Iustin Pop
1203 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1204 a8083063 Iustin Pop
1205 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1206 a8083063 Iustin Pop
1207 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1208 a8083063 Iustin Pop
1209 c8a0948f Michael Hanselmann
    _RemoveHostFromEtcHosts(node.name)
1210 c8a0948f Michael Hanselmann
1211 a8083063 Iustin Pop
1212 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1213 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1214 a8083063 Iustin Pop

1215 a8083063 Iustin Pop
  """
1216 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1217 a8083063 Iustin Pop
1218 a8083063 Iustin Pop
  def CheckPrereq(self):
1219 a8083063 Iustin Pop
    """Check prerequisites.
1220 a8083063 Iustin Pop

1221 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1222 a8083063 Iustin Pop

1223 a8083063 Iustin Pop
    """
1224 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1225 3ef10550 Michael Hanselmann
                                     "mtotal", "mnode", "mfree",
1226 3ef10550 Michael Hanselmann
                                     "bootid"])
1227 a8083063 Iustin Pop
1228 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1229 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1230 ec223efb Iustin Pop
                               "pip", "sip"],
1231 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1232 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1233 a8083063 Iustin Pop
1234 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1235 a8083063 Iustin Pop
1236 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1237 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1238 a8083063 Iustin Pop

1239 a8083063 Iustin Pop
    """
1240 246e180a Iustin Pop
    nodenames = self.wanted
1241 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1242 a8083063 Iustin Pop
1243 a8083063 Iustin Pop
    # begin data gathering
1244 a8083063 Iustin Pop
1245 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1246 a8083063 Iustin Pop
      live_data = {}
1247 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1248 a8083063 Iustin Pop
      for name in nodenames:
1249 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1250 a8083063 Iustin Pop
        if nodeinfo:
1251 a8083063 Iustin Pop
          live_data[name] = {
1252 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1253 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1254 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1255 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1256 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1257 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1258 a8083063 Iustin Pop
            }
1259 a8083063 Iustin Pop
        else:
1260 a8083063 Iustin Pop
          live_data[name] = {}
1261 a8083063 Iustin Pop
    else:
1262 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1263 a8083063 Iustin Pop
1264 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1265 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1266 a8083063 Iustin Pop
1267 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1268 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1269 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1270 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1271 a8083063 Iustin Pop
1272 ec223efb Iustin Pop
      for instance_name in instancelist:
1273 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1274 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1275 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1276 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1277 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1278 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1279 a8083063 Iustin Pop
1280 a8083063 Iustin Pop
    # end data gathering
1281 a8083063 Iustin Pop
1282 a8083063 Iustin Pop
    output = []
1283 a8083063 Iustin Pop
    for node in nodelist:
1284 a8083063 Iustin Pop
      node_output = []
1285 a8083063 Iustin Pop
      for field in self.op.output_fields:
1286 a8083063 Iustin Pop
        if field == "name":
1287 a8083063 Iustin Pop
          val = node.name
1288 ec223efb Iustin Pop
        elif field == "pinst_list":
1289 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1290 ec223efb Iustin Pop
        elif field == "sinst_list":
1291 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1292 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1293 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1294 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1295 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1296 a8083063 Iustin Pop
        elif field == "pip":
1297 a8083063 Iustin Pop
          val = node.primary_ip
1298 a8083063 Iustin Pop
        elif field == "sip":
1299 a8083063 Iustin Pop
          val = node.secondary_ip
1300 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1301 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1302 a8083063 Iustin Pop
        else:
1303 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1304 a8083063 Iustin Pop
        node_output.append(val)
1305 a8083063 Iustin Pop
      output.append(node_output)
1306 a8083063 Iustin Pop
1307 a8083063 Iustin Pop
    return output
1308 a8083063 Iustin Pop
1309 a8083063 Iustin Pop
1310 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1311 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1312 dcb93971 Michael Hanselmann

1313 dcb93971 Michael Hanselmann
  """
1314 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1315 dcb93971 Michael Hanselmann
1316 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1317 dcb93971 Michael Hanselmann
    """Check prerequisites.
1318 dcb93971 Michael Hanselmann

1319 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1320 dcb93971 Michael Hanselmann

1321 dcb93971 Michael Hanselmann
    """
1322 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1323 dcb93971 Michael Hanselmann
1324 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1325 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1326 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1327 dcb93971 Michael Hanselmann
1328 dcb93971 Michael Hanselmann
1329 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1330 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1331 dcb93971 Michael Hanselmann

1332 dcb93971 Michael Hanselmann
    """
1333 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1334 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1335 dcb93971 Michael Hanselmann
1336 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1337 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1338 dcb93971 Michael Hanselmann
1339 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1340 dcb93971 Michael Hanselmann
1341 dcb93971 Michael Hanselmann
    output = []
1342 dcb93971 Michael Hanselmann
    for node in nodenames:
1343 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1344 37d19eb2 Michael Hanselmann
        continue
1345 37d19eb2 Michael Hanselmann
1346 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1347 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1348 dcb93971 Michael Hanselmann
1349 dcb93971 Michael Hanselmann
      for vol in node_vols:
1350 dcb93971 Michael Hanselmann
        node_output = []
1351 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1352 dcb93971 Michael Hanselmann
          if field == "node":
1353 dcb93971 Michael Hanselmann
            val = node
1354 dcb93971 Michael Hanselmann
          elif field == "phys":
1355 dcb93971 Michael Hanselmann
            val = vol['dev']
1356 dcb93971 Michael Hanselmann
          elif field == "vg":
1357 dcb93971 Michael Hanselmann
            val = vol['vg']
1358 dcb93971 Michael Hanselmann
          elif field == "name":
1359 dcb93971 Michael Hanselmann
            val = vol['name']
1360 dcb93971 Michael Hanselmann
          elif field == "size":
1361 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1362 dcb93971 Michael Hanselmann
          elif field == "instance":
1363 dcb93971 Michael Hanselmann
            for inst in ilist:
1364 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1365 dcb93971 Michael Hanselmann
                continue
1366 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1367 dcb93971 Michael Hanselmann
                val = inst.name
1368 dcb93971 Michael Hanselmann
                break
1369 dcb93971 Michael Hanselmann
            else:
1370 dcb93971 Michael Hanselmann
              val = '-'
1371 dcb93971 Michael Hanselmann
          else:
1372 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1373 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1374 dcb93971 Michael Hanselmann
1375 dcb93971 Michael Hanselmann
        output.append(node_output)
1376 dcb93971 Michael Hanselmann
1377 dcb93971 Michael Hanselmann
    return output
1378 dcb93971 Michael Hanselmann
1379 dcb93971 Michael Hanselmann
1380 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1381 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1382 a8083063 Iustin Pop

1383 a8083063 Iustin Pop
  """
1384 a8083063 Iustin Pop
  HPATH = "node-add"
1385 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1386 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1387 a8083063 Iustin Pop
1388 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1389 a8083063 Iustin Pop
    """Build hooks env.
1390 a8083063 Iustin Pop

1391 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1392 a8083063 Iustin Pop

1393 a8083063 Iustin Pop
    """
1394 a8083063 Iustin Pop
    env = {
1395 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1396 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1397 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1398 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1399 a8083063 Iustin Pop
      }
1400 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1401 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1402 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1403 a8083063 Iustin Pop
1404 a8083063 Iustin Pop
  def CheckPrereq(self):
1405 a8083063 Iustin Pop
    """Check prerequisites.
1406 a8083063 Iustin Pop

1407 a8083063 Iustin Pop
    This checks:
1408 a8083063 Iustin Pop
     - the new node is not already in the config
1409 a8083063 Iustin Pop
     - it is resolvable
1410 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1411 a8083063 Iustin Pop

1412 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1413 a8083063 Iustin Pop

1414 a8083063 Iustin Pop
    """
1415 a8083063 Iustin Pop
    node_name = self.op.node_name
1416 a8083063 Iustin Pop
    cfg = self.cfg
1417 a8083063 Iustin Pop
1418 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1419 a8083063 Iustin Pop
1420 bcf043c9 Iustin Pop
    node = dns_data.name
1421 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1422 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1423 a8083063 Iustin Pop
    if secondary_ip is None:
1424 a8083063 Iustin Pop
      secondary_ip = primary_ip
1425 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1426 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1427 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1428 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1429 a8083063 Iustin Pop
    if node in node_list:
1430 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1431 3ecf6786 Iustin Pop
                                 % node)
1432 a8083063 Iustin Pop
1433 a8083063 Iustin Pop
    for existing_node_name in node_list:
1434 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1435 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1436 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1437 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1438 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1439 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1440 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1441 a8083063 Iustin Pop
1442 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1443 a8083063 Iustin Pop
    # same as for the master
1444 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1445 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1446 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1447 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1448 a8083063 Iustin Pop
      if master_singlehomed:
1449 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1450 3ecf6786 Iustin Pop
                                   " new node has one")
1451 a8083063 Iustin Pop
      else:
1452 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1453 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1454 a8083063 Iustin Pop
1455 a8083063 Iustin Pop
    # checks reachablity
1456 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(utils.HostInfo().name,
1457 16abfbc2 Alexander Schreiber
                         primary_ip,
1458 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
1459 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1460 a8083063 Iustin Pop
1461 a8083063 Iustin Pop
    if not newbie_singlehomed:
1462 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1463 16abfbc2 Alexander Schreiber
      if not utils.TcpPing(myself.secondary_ip,
1464 16abfbc2 Alexander Schreiber
                           secondary_ip,
1465 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT):
1466 f4bc1f2c Michael Hanselmann
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
1467 f4bc1f2c Michael Hanselmann
                                   " based ping to noded port")
1468 a8083063 Iustin Pop
1469 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1470 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1471 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1472 a8083063 Iustin Pop
1473 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1474 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1475 a8083063 Iustin Pop

1476 a8083063 Iustin Pop
    """
1477 a8083063 Iustin Pop
    new_node = self.new_node
1478 a8083063 Iustin Pop
    node = new_node.name
1479 a8083063 Iustin Pop
1480 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1481 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1482 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1483 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1484 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1485 a8083063 Iustin Pop
    try:
1486 a8083063 Iustin Pop
      gntpem = f.read(8192)
1487 a8083063 Iustin Pop
    finally:
1488 a8083063 Iustin Pop
      f.close()
1489 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1490 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1491 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1492 a8083063 Iustin Pop
    # parsed by the shell sequence below
1493 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1494 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1495 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1496 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1497 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1498 a8083063 Iustin Pop
1499 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1500 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1501 a8083063 Iustin Pop
    # either by being constants or by the checks above
1502 a8083063 Iustin Pop
    ss = self.sstore
1503 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1504 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1505 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1506 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1507 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1508 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1509 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1510 a8083063 Iustin Pop
1511 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1512 a8083063 Iustin Pop
    if result.failed:
1513 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1514 3ecf6786 Iustin Pop
                               " output: %s" %
1515 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1516 a8083063 Iustin Pop
1517 a8083063 Iustin Pop
    # check connectivity
1518 a8083063 Iustin Pop
    time.sleep(4)
1519 a8083063 Iustin Pop
1520 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1521 a8083063 Iustin Pop
    if result:
1522 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1523 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1524 a8083063 Iustin Pop
                    (node, result))
1525 a8083063 Iustin Pop
      else:
1526 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1527 3ecf6786 Iustin Pop
                                 " node version %s" %
1528 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1529 a8083063 Iustin Pop
    else:
1530 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1531 a8083063 Iustin Pop
1532 a8083063 Iustin Pop
    # setup ssh on node
1533 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1534 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1535 a8083063 Iustin Pop
    keyarray = []
1536 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1537 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1538 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1539 a8083063 Iustin Pop
1540 a8083063 Iustin Pop
    for i in keyfiles:
1541 a8083063 Iustin Pop
      f = open(i, 'r')
1542 a8083063 Iustin Pop
      try:
1543 a8083063 Iustin Pop
        keyarray.append(f.read())
1544 a8083063 Iustin Pop
      finally:
1545 a8083063 Iustin Pop
        f.close()
1546 a8083063 Iustin Pop
1547 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1548 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1549 a8083063 Iustin Pop
1550 a8083063 Iustin Pop
    if not result:
1551 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1552 a8083063 Iustin Pop
1553 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1554 9440aeab Michael Hanselmann
    _AddHostToEtcHosts(new_node.name)
1555 c8a0948f Michael Hanselmann
1556 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1557 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1558 a8083063 Iustin Pop
1559 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1560 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1561 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1562 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1563 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1564 16abfbc2 Alexander Schreiber
                                    10, False):
1565 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
1566 f4bc1f2c Michael Hanselmann
                                 " you gave (%s). Please fix and re-run this"
1567 f4bc1f2c Michael Hanselmann
                                 " command." % new_node.secondary_ip)
1568 a8083063 Iustin Pop
1569 ff98055b Iustin Pop
    success, msg = ssh.VerifyNodeHostname(node)
1570 ff98055b Iustin Pop
    if not success:
1571 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1572 f4bc1f2c Michael Hanselmann
                               " than the one the resolver gives: %s."
1573 f4bc1f2c Michael Hanselmann
                               " Please fix and re-run this command." %
1574 ff98055b Iustin Pop
                               (node, msg))
1575 ff98055b Iustin Pop
1576 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1577 a8083063 Iustin Pop
    # including the node just added
1578 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1579 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1580 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1581 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1582 a8083063 Iustin Pop
1583 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1584 82122173 Iustin Pop
    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
1585 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1586 a8083063 Iustin Pop
      for to_node in dist_nodes:
1587 a8083063 Iustin Pop
        if not result[to_node]:
1588 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1589 a8083063 Iustin Pop
                       (fname, to_node))
1590 a8083063 Iustin Pop
1591 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1592 a8083063 Iustin Pop
    for fname in to_copy:
1593 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1594 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1595 a8083063 Iustin Pop
1596 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1597 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1598 a8083063 Iustin Pop
1599 a8083063 Iustin Pop
1600 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1601 a8083063 Iustin Pop
  """Failover the master node to the current node.
1602 a8083063 Iustin Pop

1603 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1604 a8083063 Iustin Pop

1605 a8083063 Iustin Pop
  """
1606 a8083063 Iustin Pop
  HPATH = "master-failover"
1607 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1608 a8083063 Iustin Pop
  REQ_MASTER = False
1609 a8083063 Iustin Pop
  _OP_REQP = []
1610 a8083063 Iustin Pop
1611 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1612 a8083063 Iustin Pop
    """Build hooks env.
1613 a8083063 Iustin Pop

1614 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1615 a8083063 Iustin Pop
    the nodes in the post phase.
1616 a8083063 Iustin Pop

1617 a8083063 Iustin Pop
    """
1618 a8083063 Iustin Pop
    env = {
1619 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1620 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1621 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1622 a8083063 Iustin Pop
      }
1623 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1624 a8083063 Iustin Pop
1625 a8083063 Iustin Pop
  def CheckPrereq(self):
1626 a8083063 Iustin Pop
    """Check prerequisites.
1627 a8083063 Iustin Pop

1628 a8083063 Iustin Pop
    This checks that we are not already the master.
1629 a8083063 Iustin Pop

1630 a8083063 Iustin Pop
    """
1631 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1632 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1633 a8083063 Iustin Pop
1634 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1635 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1636 f4bc1f2c Michael Hanselmann
                                 " where you want the new master to be."
1637 f4bc1f2c Michael Hanselmann
                                 " %s is already the master" %
1638 3ecf6786 Iustin Pop
                                 self.old_master)
1639 a8083063 Iustin Pop
1640 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1641 a8083063 Iustin Pop
    """Failover the master node.
1642 a8083063 Iustin Pop

1643 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1644 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1645 a8083063 Iustin Pop
    master.
1646 a8083063 Iustin Pop

1647 a8083063 Iustin Pop
    """
1648 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1649 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1650 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1651 a8083063 Iustin Pop
1652 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1653 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1654 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1655 a8083063 Iustin Pop
1656 880478f8 Iustin Pop
    ss = self.sstore
1657 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1658 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1659 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1660 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1661 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1662 880478f8 Iustin Pop
1663 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1664 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1665 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1666 f4bc1f2c Michael Hanselmann
      feedback_fn("Error in activating the master IP on the new master,"
1667 f4bc1f2c Michael Hanselmann
                  " please fix manually.")
1668 a8083063 Iustin Pop
1669 a8083063 Iustin Pop
1670 a8083063 Iustin Pop
1671 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1672 a8083063 Iustin Pop
  """Query cluster configuration.
1673 a8083063 Iustin Pop

1674 a8083063 Iustin Pop
  """
1675 a8083063 Iustin Pop
  _OP_REQP = []
1676 59322403 Iustin Pop
  REQ_MASTER = False
1677 a8083063 Iustin Pop
1678 a8083063 Iustin Pop
  def CheckPrereq(self):
1679 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1680 a8083063 Iustin Pop

1681 a8083063 Iustin Pop
    """
1682 a8083063 Iustin Pop
    pass
1683 a8083063 Iustin Pop
1684 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1685 a8083063 Iustin Pop
    """Return cluster config.
1686 a8083063 Iustin Pop

1687 a8083063 Iustin Pop
    """
1688 a8083063 Iustin Pop
    result = {
1689 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1690 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1691 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1692 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1693 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1694 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1695 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1696 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1697 a8083063 Iustin Pop
      }
1698 a8083063 Iustin Pop
1699 a8083063 Iustin Pop
    return result
1700 a8083063 Iustin Pop
1701 a8083063 Iustin Pop
1702 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1703 a8083063 Iustin Pop
  """Copy file to cluster.
1704 a8083063 Iustin Pop

1705 a8083063 Iustin Pop
  """
1706 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1707 a8083063 Iustin Pop
1708 a8083063 Iustin Pop
  def CheckPrereq(self):
1709 a8083063 Iustin Pop
    """Check prerequisites.
1710 a8083063 Iustin Pop

1711 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1712 a8083063 Iustin Pop
    of nodes is valid.
1713 a8083063 Iustin Pop

1714 a8083063 Iustin Pop
    """
1715 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1716 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1717 dcb93971 Michael Hanselmann
1718 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1719 a8083063 Iustin Pop
1720 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1721 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1722 a8083063 Iustin Pop

1723 a8083063 Iustin Pop
    Args:
1724 a8083063 Iustin Pop
      opts - class with options as members
1725 a8083063 Iustin Pop
      args - list containing a single element, the file name
1726 a8083063 Iustin Pop
    Opts used:
1727 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1728 a8083063 Iustin Pop

1729 a8083063 Iustin Pop
    """
1730 a8083063 Iustin Pop
    filename = self.op.filename
1731 a8083063 Iustin Pop
1732 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
1733 a8083063 Iustin Pop
1734 a7ba5e53 Iustin Pop
    for node in self.nodes:
1735 a8083063 Iustin Pop
      if node == myname:
1736 a8083063 Iustin Pop
        continue
1737 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1738 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1739 a8083063 Iustin Pop
1740 a8083063 Iustin Pop
1741 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1742 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1743 a8083063 Iustin Pop

1744 a8083063 Iustin Pop
  """
1745 a8083063 Iustin Pop
  _OP_REQP = []
1746 a8083063 Iustin Pop
1747 a8083063 Iustin Pop
  def CheckPrereq(self):
1748 a8083063 Iustin Pop
    """No prerequisites.
1749 a8083063 Iustin Pop

1750 a8083063 Iustin Pop
    """
1751 a8083063 Iustin Pop
    pass
1752 a8083063 Iustin Pop
1753 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1754 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1755 a8083063 Iustin Pop

1756 a8083063 Iustin Pop
    """
1757 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1758 a8083063 Iustin Pop
1759 a8083063 Iustin Pop
1760 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1761 a8083063 Iustin Pop
  """Run a command on some nodes.
1762 a8083063 Iustin Pop

1763 a8083063 Iustin Pop
  """
1764 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1765 a8083063 Iustin Pop
1766 a8083063 Iustin Pop
  def CheckPrereq(self):
1767 a8083063 Iustin Pop
    """Check prerequisites.
1768 a8083063 Iustin Pop

1769 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1770 a8083063 Iustin Pop

1771 a8083063 Iustin Pop
    """
1772 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1773 a8083063 Iustin Pop
1774 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1775 a8083063 Iustin Pop
    """Run a command on some nodes.
1776 a8083063 Iustin Pop

1777 a8083063 Iustin Pop
    """
1778 a8083063 Iustin Pop
    data = []
1779 a8083063 Iustin Pop
    for node in self.nodes:
1780 a7ba5e53 Iustin Pop
      result = ssh.SSHCall(node, "root", self.op.command)
1781 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1782 a8083063 Iustin Pop
1783 a8083063 Iustin Pop
    return data
1784 a8083063 Iustin Pop
1785 a8083063 Iustin Pop
1786 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1787 a8083063 Iustin Pop
  """Bring up an instance's disks.
1788 a8083063 Iustin Pop

1789 a8083063 Iustin Pop
  """
1790 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1791 a8083063 Iustin Pop
1792 a8083063 Iustin Pop
  def CheckPrereq(self):
1793 a8083063 Iustin Pop
    """Check prerequisites.
1794 a8083063 Iustin Pop

1795 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1796 a8083063 Iustin Pop

1797 a8083063 Iustin Pop
    """
1798 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1799 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1800 a8083063 Iustin Pop
    if instance is None:
1801 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1802 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1803 a8083063 Iustin Pop
    self.instance = instance
1804 a8083063 Iustin Pop
1805 a8083063 Iustin Pop
1806 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1807 a8083063 Iustin Pop
    """Activate the disks.
1808 a8083063 Iustin Pop

1809 a8083063 Iustin Pop
    """
1810 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1811 a8083063 Iustin Pop
    if not disks_ok:
1812 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1813 a8083063 Iustin Pop
1814 a8083063 Iustin Pop
    return disks_info
1815 a8083063 Iustin Pop
1816 a8083063 Iustin Pop
1817 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1818 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1819 a8083063 Iustin Pop

1820 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1821 a8083063 Iustin Pop

1822 a8083063 Iustin Pop
  Args:
1823 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1824 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1825 a8083063 Iustin Pop
                        in an error return from the function
1826 a8083063 Iustin Pop

1827 a8083063 Iustin Pop
  Returns:
1828 a8083063 Iustin Pop
    false if the operation failed
1829 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1830 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1831 a8083063 Iustin Pop
  """
1832 a8083063 Iustin Pop
  device_info = []
1833 a8083063 Iustin Pop
  disks_ok = True
1834 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1835 a8083063 Iustin Pop
    master_result = None
1836 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1837 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1838 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1839 3f78eef2 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk,
1840 3f78eef2 Iustin Pop
                                          instance.name, is_primary)
1841 a8083063 Iustin Pop
      if not result:
1842 f4bc1f2c Michael Hanselmann
        logger.Error("could not prepare block device %s on node %s"
1843 f4bc1f2c Michael Hanselmann
                     " (is_primary=%s)" %
1844 f4bc1f2c Michael Hanselmann
                     (inst_disk.iv_name, node, is_primary))
1845 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1846 a8083063 Iustin Pop
          disks_ok = False
1847 a8083063 Iustin Pop
      if is_primary:
1848 a8083063 Iustin Pop
        master_result = result
1849 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1850 a8083063 Iustin Pop
                        master_result))
1851 a8083063 Iustin Pop
1852 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1853 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1854 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1855 b352ab5b Iustin Pop
  for disk in instance.disks:
1856 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1857 b352ab5b Iustin Pop
1858 a8083063 Iustin Pop
  return disks_ok, device_info
1859 a8083063 Iustin Pop
1860 a8083063 Iustin Pop
1861 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1862 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1863 3ecf6786 Iustin Pop

1864 3ecf6786 Iustin Pop
  """
1865 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1866 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1867 fe7b0351 Michael Hanselmann
  if not disks_ok:
1868 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1869 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1870 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1871 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1872 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1873 fe7b0351 Michael Hanselmann
1874 fe7b0351 Michael Hanselmann
1875 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1876 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1877 a8083063 Iustin Pop

1878 a8083063 Iustin Pop
  """
1879 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1880 a8083063 Iustin Pop
1881 a8083063 Iustin Pop
  def CheckPrereq(self):
1882 a8083063 Iustin Pop
    """Check prerequisites.
1883 a8083063 Iustin Pop

1884 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1885 a8083063 Iustin Pop

1886 a8083063 Iustin Pop
    """
1887 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1888 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1889 a8083063 Iustin Pop
    if instance is None:
1890 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1891 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1892 a8083063 Iustin Pop
    self.instance = instance
1893 a8083063 Iustin Pop
1894 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1895 a8083063 Iustin Pop
    """Deactivate the disks
1896 a8083063 Iustin Pop

1897 a8083063 Iustin Pop
    """
1898 a8083063 Iustin Pop
    instance = self.instance
1899 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1900 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1901 a8083063 Iustin Pop
    if not type(ins_l) is list:
1902 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1903 3ecf6786 Iustin Pop
                               instance.primary_node)
1904 a8083063 Iustin Pop
1905 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1906 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1907 3ecf6786 Iustin Pop
                               " block devices.")
1908 a8083063 Iustin Pop
1909 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1910 a8083063 Iustin Pop
1911 a8083063 Iustin Pop
1912 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1913 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1914 a8083063 Iustin Pop

1915 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1916 a8083063 Iustin Pop

1917 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1918 a8083063 Iustin Pop
  ignored.
1919 a8083063 Iustin Pop

1920 a8083063 Iustin Pop
  """
1921 a8083063 Iustin Pop
  result = True
1922 a8083063 Iustin Pop
  for disk in instance.disks:
1923 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1924 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1925 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1926 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1927 a8083063 Iustin Pop
                     (disk.iv_name, node))
1928 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1929 a8083063 Iustin Pop
          result = False
1930 a8083063 Iustin Pop
  return result
1931 a8083063 Iustin Pop
1932 a8083063 Iustin Pop
1933 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1934 a8083063 Iustin Pop
  """Starts an instance.
1935 a8083063 Iustin Pop

1936 a8083063 Iustin Pop
  """
1937 a8083063 Iustin Pop
  HPATH = "instance-start"
1938 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1939 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1940 a8083063 Iustin Pop
1941 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1942 a8083063 Iustin Pop
    """Build hooks env.
1943 a8083063 Iustin Pop

1944 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1945 a8083063 Iustin Pop

1946 a8083063 Iustin Pop
    """
1947 a8083063 Iustin Pop
    env = {
1948 a8083063 Iustin Pop
      "FORCE": self.op.force,
1949 a8083063 Iustin Pop
      }
1950 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1951 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1952 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1953 a8083063 Iustin Pop
    return env, nl, nl
1954 a8083063 Iustin Pop
1955 a8083063 Iustin Pop
  def CheckPrereq(self):
1956 a8083063 Iustin Pop
    """Check prerequisites.
1957 a8083063 Iustin Pop

1958 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1959 a8083063 Iustin Pop

1960 a8083063 Iustin Pop
    """
1961 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1962 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1963 a8083063 Iustin Pop
    if instance is None:
1964 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1965 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1966 a8083063 Iustin Pop
1967 a8083063 Iustin Pop
    # check bridges existance
1968 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
1969 a8083063 Iustin Pop
1970 a8083063 Iustin Pop
    self.instance = instance
1971 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1972 a8083063 Iustin Pop
1973 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1974 a8083063 Iustin Pop
    """Start the instance.
1975 a8083063 Iustin Pop

1976 a8083063 Iustin Pop
    """
1977 a8083063 Iustin Pop
    instance = self.instance
1978 a8083063 Iustin Pop
    force = self.op.force
1979 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1980 a8083063 Iustin Pop
1981 a8083063 Iustin Pop
    node_current = instance.primary_node
1982 a8083063 Iustin Pop
1983 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1984 a8083063 Iustin Pop
    if not nodeinfo:
1985 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact node %s for infos" %
1986 3ecf6786 Iustin Pop
                               (node_current))
1987 a8083063 Iustin Pop
1988 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
1989 a8083063 Iustin Pop
    memory = instance.memory
1990 a8083063 Iustin Pop
    if memory > freememory:
1991 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to start instance"
1992 3ecf6786 Iustin Pop
                               " %s on node %s"
1993 3ecf6786 Iustin Pop
                               " needed %s MiB, available %s MiB" %
1994 3ecf6786 Iustin Pop
                               (instance.name, node_current, memory,
1995 3ecf6786 Iustin Pop
                                freememory))
1996 a8083063 Iustin Pop
1997 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
1998 a8083063 Iustin Pop
1999 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
2000 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2001 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
2002 a8083063 Iustin Pop
2003 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
2004 a8083063 Iustin Pop
2005 a8083063 Iustin Pop
2006 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
2007 bf6929a2 Alexander Schreiber
  """Reboot an instance.
2008 bf6929a2 Alexander Schreiber

2009 bf6929a2 Alexander Schreiber
  """
2010 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
2011 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
2012 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
2013 bf6929a2 Alexander Schreiber
2014 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
2015 bf6929a2 Alexander Schreiber
    """Build hooks env.
2016 bf6929a2 Alexander Schreiber

2017 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2018 bf6929a2 Alexander Schreiber

2019 bf6929a2 Alexander Schreiber
    """
2020 bf6929a2 Alexander Schreiber
    env = {
2021 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2022 bf6929a2 Alexander Schreiber
      }
2023 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2024 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2025 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2026 bf6929a2 Alexander Schreiber
    return env, nl, nl
2027 bf6929a2 Alexander Schreiber
2028 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2029 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2030 bf6929a2 Alexander Schreiber

2031 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2032 bf6929a2 Alexander Schreiber

2033 bf6929a2 Alexander Schreiber
    """
2034 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2035 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2036 bf6929a2 Alexander Schreiber
    if instance is None:
2037 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2038 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2039 bf6929a2 Alexander Schreiber
2040 bf6929a2 Alexander Schreiber
    # check bridges existance
2041 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2042 bf6929a2 Alexander Schreiber
2043 bf6929a2 Alexander Schreiber
    self.instance = instance
2044 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2045 bf6929a2 Alexander Schreiber
2046 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2047 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2048 bf6929a2 Alexander Schreiber

2049 bf6929a2 Alexander Schreiber
    """
2050 bf6929a2 Alexander Schreiber
    instance = self.instance
2051 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2052 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2053 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2054 bf6929a2 Alexander Schreiber
2055 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2056 bf6929a2 Alexander Schreiber
2057 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2058 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2059 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2060 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2061 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2062 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2063 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2064 bf6929a2 Alexander Schreiber
2065 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2066 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2067 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2068 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2069 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2070 bf6929a2 Alexander Schreiber
    else:
2071 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2072 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2073 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2074 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2075 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2076 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2077 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2078 bf6929a2 Alexander Schreiber
2079 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2080 bf6929a2 Alexander Schreiber
2081 bf6929a2 Alexander Schreiber
2082 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2083 a8083063 Iustin Pop
  """Shutdown an instance.
2084 a8083063 Iustin Pop

2085 a8083063 Iustin Pop
  """
2086 a8083063 Iustin Pop
  HPATH = "instance-stop"
2087 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2088 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2089 a8083063 Iustin Pop
2090 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2091 a8083063 Iustin Pop
    """Build hooks env.
2092 a8083063 Iustin Pop

2093 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2094 a8083063 Iustin Pop

2095 a8083063 Iustin Pop
    """
2096 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2097 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2098 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2099 a8083063 Iustin Pop
    return env, nl, nl
2100 a8083063 Iustin Pop
2101 a8083063 Iustin Pop
  def CheckPrereq(self):
2102 a8083063 Iustin Pop
    """Check prerequisites.
2103 a8083063 Iustin Pop

2104 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2105 a8083063 Iustin Pop

2106 a8083063 Iustin Pop
    """
2107 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2108 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2109 a8083063 Iustin Pop
    if instance is None:
2110 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2111 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2112 a8083063 Iustin Pop
    self.instance = instance
2113 a8083063 Iustin Pop
2114 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2115 a8083063 Iustin Pop
    """Shutdown the instance.
2116 a8083063 Iustin Pop

2117 a8083063 Iustin Pop
    """
2118 a8083063 Iustin Pop
    instance = self.instance
2119 a8083063 Iustin Pop
    node_current = instance.primary_node
2120 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2121 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2122 a8083063 Iustin Pop
2123 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2124 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2125 a8083063 Iustin Pop
2126 a8083063 Iustin Pop
2127 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2128 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2129 fe7b0351 Michael Hanselmann

2130 fe7b0351 Michael Hanselmann
  """
2131 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2132 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2133 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2134 fe7b0351 Michael Hanselmann
2135 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2136 fe7b0351 Michael Hanselmann
    """Build hooks env.
2137 fe7b0351 Michael Hanselmann

2138 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2139 fe7b0351 Michael Hanselmann

2140 fe7b0351 Michael Hanselmann
    """
2141 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2142 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2143 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2144 fe7b0351 Michael Hanselmann
    return env, nl, nl
2145 fe7b0351 Michael Hanselmann
2146 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2147 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2148 fe7b0351 Michael Hanselmann

2149 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2150 fe7b0351 Michael Hanselmann

2151 fe7b0351 Michael Hanselmann
    """
2152 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2153 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2154 fe7b0351 Michael Hanselmann
    if instance is None:
2155 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2156 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2157 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2158 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2159 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2160 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2161 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2162 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2163 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2164 fe7b0351 Michael Hanselmann
    if remote_info:
2165 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2166 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2167 3ecf6786 Iustin Pop
                                  instance.primary_node))
2168 d0834de3 Michael Hanselmann
2169 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2170 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2171 d0834de3 Michael Hanselmann
      # OS verification
2172 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2173 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2174 d0834de3 Michael Hanselmann
      if pnode is None:
2175 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2176 3ecf6786 Iustin Pop
                                   self.op.pnode)
2177 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2178 dfa96ded Guido Trotter
      if not os_obj:
2179 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2180 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2181 d0834de3 Michael Hanselmann
2182 fe7b0351 Michael Hanselmann
    self.instance = instance
2183 fe7b0351 Michael Hanselmann
2184 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2185 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2186 fe7b0351 Michael Hanselmann

2187 fe7b0351 Michael Hanselmann
    """
2188 fe7b0351 Michael Hanselmann
    inst = self.instance
2189 fe7b0351 Michael Hanselmann
2190 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2191 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2192 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2193 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2194 d0834de3 Michael Hanselmann
2195 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2196 fe7b0351 Michael Hanselmann
    try:
2197 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2198 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2199 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Could not install OS for instance %s"
2200 f4bc1f2c Michael Hanselmann
                                 " on node %s" %
2201 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2202 fe7b0351 Michael Hanselmann
    finally:
2203 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2204 fe7b0351 Michael Hanselmann
2205 fe7b0351 Michael Hanselmann
2206 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2207 decd5f45 Iustin Pop
  """Rename an instance.
2208 decd5f45 Iustin Pop

2209 decd5f45 Iustin Pop
  """
2210 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2211 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2212 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2213 decd5f45 Iustin Pop
2214 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2215 decd5f45 Iustin Pop
    """Build hooks env.
2216 decd5f45 Iustin Pop

2217 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2218 decd5f45 Iustin Pop

2219 decd5f45 Iustin Pop
    """
2220 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2221 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2222 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2223 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2224 decd5f45 Iustin Pop
    return env, nl, nl
2225 decd5f45 Iustin Pop
2226 decd5f45 Iustin Pop
  def CheckPrereq(self):
2227 decd5f45 Iustin Pop
    """Check prerequisites.
2228 decd5f45 Iustin Pop

2229 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2230 decd5f45 Iustin Pop

2231 decd5f45 Iustin Pop
    """
2232 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2233 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2234 decd5f45 Iustin Pop
    if instance is None:
2235 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2236 decd5f45 Iustin Pop
                                 self.op.instance_name)
2237 decd5f45 Iustin Pop
    if instance.status != "down":
2238 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2239 decd5f45 Iustin Pop
                                 self.op.instance_name)
2240 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2241 decd5f45 Iustin Pop
    if remote_info:
2242 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2243 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2244 decd5f45 Iustin Pop
                                  instance.primary_node))
2245 decd5f45 Iustin Pop
    self.instance = instance
2246 decd5f45 Iustin Pop
2247 decd5f45 Iustin Pop
    # new name verification
2248 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2249 decd5f45 Iustin Pop
2250 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2251 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2252 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2253 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2254 decd5f45 Iustin Pop
      if not result.failed:
2255 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2256 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2257 decd5f45 Iustin Pop
2258 decd5f45 Iustin Pop
2259 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2260 decd5f45 Iustin Pop
    """Reinstall the instance.
2261 decd5f45 Iustin Pop

2262 decd5f45 Iustin Pop
    """
2263 decd5f45 Iustin Pop
    inst = self.instance
2264 decd5f45 Iustin Pop
    old_name = inst.name
2265 decd5f45 Iustin Pop
2266 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2267 decd5f45 Iustin Pop
2268 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2269 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2270 decd5f45 Iustin Pop
2271 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2272 decd5f45 Iustin Pop
    try:
2273 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2274 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2275 f4bc1f2c Michael Hanselmann
        msg = ("Could run OS rename script for instance %s on node %s (but the"
2276 f4bc1f2c Michael Hanselmann
               " instance has been renamed in Ganeti)" %
2277 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2278 decd5f45 Iustin Pop
        logger.Error(msg)
2279 decd5f45 Iustin Pop
    finally:
2280 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2281 decd5f45 Iustin Pop
2282 decd5f45 Iustin Pop
2283 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2284 a8083063 Iustin Pop
  """Remove an instance.
2285 a8083063 Iustin Pop

2286 a8083063 Iustin Pop
  """
2287 a8083063 Iustin Pop
  HPATH = "instance-remove"
2288 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2289 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2290 a8083063 Iustin Pop
2291 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2292 a8083063 Iustin Pop
    """Build hooks env.
2293 a8083063 Iustin Pop

2294 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2295 a8083063 Iustin Pop

2296 a8083063 Iustin Pop
    """
2297 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2298 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2299 a8083063 Iustin Pop
    return env, nl, nl
2300 a8083063 Iustin Pop
2301 a8083063 Iustin Pop
  def CheckPrereq(self):
2302 a8083063 Iustin Pop
    """Check prerequisites.
2303 a8083063 Iustin Pop

2304 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2305 a8083063 Iustin Pop

2306 a8083063 Iustin Pop
    """
2307 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2308 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2309 a8083063 Iustin Pop
    if instance is None:
2310 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2311 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2312 a8083063 Iustin Pop
    self.instance = instance
2313 a8083063 Iustin Pop
2314 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2315 a8083063 Iustin Pop
    """Remove the instance.
2316 a8083063 Iustin Pop

2317 a8083063 Iustin Pop
    """
2318 a8083063 Iustin Pop
    instance = self.instance
2319 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2320 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2321 a8083063 Iustin Pop
2322 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2323 1d67656e Iustin Pop
      if self.op.ignore_failures:
2324 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2325 1d67656e Iustin Pop
      else:
2326 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2327 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2328 a8083063 Iustin Pop
2329 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2330 a8083063 Iustin Pop
2331 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2332 1d67656e Iustin Pop
      if self.op.ignore_failures:
2333 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2334 1d67656e Iustin Pop
      else:
2335 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2336 a8083063 Iustin Pop
2337 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2338 a8083063 Iustin Pop
2339 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2340 a8083063 Iustin Pop
2341 a8083063 Iustin Pop
2342 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2343 a8083063 Iustin Pop
  """Logical unit for querying instances.
2344 a8083063 Iustin Pop

2345 a8083063 Iustin Pop
  """
2346 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2347 a8083063 Iustin Pop
2348 a8083063 Iustin Pop
  def CheckPrereq(self):
2349 a8083063 Iustin Pop
    """Check prerequisites.
2350 a8083063 Iustin Pop

2351 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2352 a8083063 Iustin Pop

2353 a8083063 Iustin Pop
    """
2354 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
2355 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2356 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2357 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2358 644eeef9 Iustin Pop
                               "sda_size", "sdb_size"],
2359 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2360 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2361 a8083063 Iustin Pop
2362 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2363 069dcc86 Iustin Pop
2364 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2365 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2366 a8083063 Iustin Pop

2367 a8083063 Iustin Pop
    """
2368 069dcc86 Iustin Pop
    instance_names = self.wanted
2369 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2370 a8083063 Iustin Pop
                     in instance_names]
2371 a8083063 Iustin Pop
2372 a8083063 Iustin Pop
    # begin data gathering
2373 a8083063 Iustin Pop
2374 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2375 a8083063 Iustin Pop
2376 a8083063 Iustin Pop
    bad_nodes = []
2377 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2378 a8083063 Iustin Pop
      live_data = {}
2379 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2380 a8083063 Iustin Pop
      for name in nodes:
2381 a8083063 Iustin Pop
        result = node_data[name]
2382 a8083063 Iustin Pop
        if result:
2383 a8083063 Iustin Pop
          live_data.update(result)
2384 a8083063 Iustin Pop
        elif result == False:
2385 a8083063 Iustin Pop
          bad_nodes.append(name)
2386 a8083063 Iustin Pop
        # else no instance is alive
2387 a8083063 Iustin Pop
    else:
2388 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2389 a8083063 Iustin Pop
2390 a8083063 Iustin Pop
    # end data gathering
2391 a8083063 Iustin Pop
2392 a8083063 Iustin Pop
    output = []
2393 a8083063 Iustin Pop
    for instance in instance_list:
2394 a8083063 Iustin Pop
      iout = []
2395 a8083063 Iustin Pop
      for field in self.op.output_fields:
2396 a8083063 Iustin Pop
        if field == "name":
2397 a8083063 Iustin Pop
          val = instance.name
2398 a8083063 Iustin Pop
        elif field == "os":
2399 a8083063 Iustin Pop
          val = instance.os
2400 a8083063 Iustin Pop
        elif field == "pnode":
2401 a8083063 Iustin Pop
          val = instance.primary_node
2402 a8083063 Iustin Pop
        elif field == "snodes":
2403 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2404 a8083063 Iustin Pop
        elif field == "admin_state":
2405 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2406 a8083063 Iustin Pop
        elif field == "oper_state":
2407 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2408 8a23d2d3 Iustin Pop
            val = None
2409 a8083063 Iustin Pop
          else:
2410 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2411 a8083063 Iustin Pop
        elif field == "admin_ram":
2412 a8083063 Iustin Pop
          val = instance.memory
2413 a8083063 Iustin Pop
        elif field == "oper_ram":
2414 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2415 8a23d2d3 Iustin Pop
            val = None
2416 a8083063 Iustin Pop
          elif instance.name in live_data:
2417 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2418 a8083063 Iustin Pop
          else:
2419 a8083063 Iustin Pop
            val = "-"
2420 a8083063 Iustin Pop
        elif field == "disk_template":
2421 a8083063 Iustin Pop
          val = instance.disk_template
2422 a8083063 Iustin Pop
        elif field == "ip":
2423 a8083063 Iustin Pop
          val = instance.nics[0].ip
2424 a8083063 Iustin Pop
        elif field == "bridge":
2425 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2426 a8083063 Iustin Pop
        elif field == "mac":
2427 a8083063 Iustin Pop
          val = instance.nics[0].mac
2428 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2429 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2430 644eeef9 Iustin Pop
          if disk is None:
2431 8a23d2d3 Iustin Pop
            val = None
2432 644eeef9 Iustin Pop
          else:
2433 644eeef9 Iustin Pop
            val = disk.size
2434 a8083063 Iustin Pop
        else:
2435 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2436 a8083063 Iustin Pop
        iout.append(val)
2437 a8083063 Iustin Pop
      output.append(iout)
2438 a8083063 Iustin Pop
2439 a8083063 Iustin Pop
    return output
2440 a8083063 Iustin Pop
2441 a8083063 Iustin Pop
2442 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2443 a8083063 Iustin Pop
  """Failover an instance.
2444 a8083063 Iustin Pop

2445 a8083063 Iustin Pop
  """
2446 a8083063 Iustin Pop
  HPATH = "instance-failover"
2447 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2448 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2449 a8083063 Iustin Pop
2450 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2451 a8083063 Iustin Pop
    """Build hooks env.
2452 a8083063 Iustin Pop

2453 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2454 a8083063 Iustin Pop

2455 a8083063 Iustin Pop
    """
2456 a8083063 Iustin Pop
    env = {
2457 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2458 a8083063 Iustin Pop
      }
2459 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2460 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2461 a8083063 Iustin Pop
    return env, nl, nl
2462 a8083063 Iustin Pop
2463 a8083063 Iustin Pop
  def CheckPrereq(self):
2464 a8083063 Iustin Pop
    """Check prerequisites.
2465 a8083063 Iustin Pop

2466 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2467 a8083063 Iustin Pop

2468 a8083063 Iustin Pop
    """
2469 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2470 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2471 a8083063 Iustin Pop
    if instance is None:
2472 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2473 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2474 a8083063 Iustin Pop
2475 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2476 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2477 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2478 2a710df1 Michael Hanselmann
2479 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2480 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2481 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2482 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2483 2a710df1 Michael Hanselmann
2484 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2485 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2486 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2487 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2488 3a7c308e Guido Trotter
    if not info:
2489 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
2490 3ecf6786 Iustin Pop
                                 " from node '%s'" % nodeinfo)
2491 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2492 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Not enough memory on target node %s."
2493 3ecf6786 Iustin Pop
                                 " %d MB available, %d MB required" %
2494 3ecf6786 Iustin Pop
                                 (target_node, info['memory_free'],
2495 3ecf6786 Iustin Pop
                                  instance.memory))
2496 3a7c308e Guido Trotter
2497 a8083063 Iustin Pop
    # check bridge existance
2498 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2499 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2500 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2501 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2502 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2503 a8083063 Iustin Pop
2504 a8083063 Iustin Pop
    self.instance = instance
2505 a8083063 Iustin Pop
2506 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2507 a8083063 Iustin Pop
    """Failover an instance.
2508 a8083063 Iustin Pop

2509 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2510 a8083063 Iustin Pop
    starting it on the secondary.
2511 a8083063 Iustin Pop

2512 a8083063 Iustin Pop
    """
2513 a8083063 Iustin Pop
    instance = self.instance
2514 a8083063 Iustin Pop
2515 a8083063 Iustin Pop
    source_node = instance.primary_node
2516 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2517 a8083063 Iustin Pop
2518 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2519 a8083063 Iustin Pop
    for dev in instance.disks:
2520 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2521 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2522 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2523 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2524 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2525 a8083063 Iustin Pop
2526 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2527 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2528 a8083063 Iustin Pop
2529 a8083063 Iustin Pop
    if not nodeinfo:
2530 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact target node %s." %
2531 3ecf6786 Iustin Pop
                               target_node)
2532 a8083063 Iustin Pop
2533 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2534 a8083063 Iustin Pop
    memory = instance.memory
2535 a8083063 Iustin Pop
    if memory > free_memory:
2536 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to create instance %s on"
2537 3ecf6786 Iustin Pop
                               " node %s. needed %s MiB, available %s MiB" %
2538 3ecf6786 Iustin Pop
                               (instance.name, target_node, memory,
2539 3ecf6786 Iustin Pop
                                free_memory))
2540 a8083063 Iustin Pop
2541 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2542 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2543 a8083063 Iustin Pop
                (instance.name, source_node))
2544 a8083063 Iustin Pop
2545 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2546 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2547 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2548 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2549 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2550 24a40d57 Iustin Pop
      else:
2551 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2552 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2553 a8083063 Iustin Pop
2554 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2555 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2556 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2557 a8083063 Iustin Pop
2558 a8083063 Iustin Pop
    instance.primary_node = target_node
2559 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2560 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2561 a8083063 Iustin Pop
2562 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2563 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2564 a8083063 Iustin Pop
                (instance.name, target_node))
2565 a8083063 Iustin Pop
2566 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2567 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2568 a8083063 Iustin Pop
    if not disks_ok:
2569 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2570 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2571 a8083063 Iustin Pop
2572 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2573 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2574 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2575 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2576 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2577 a8083063 Iustin Pop
2578 a8083063 Iustin Pop
2579 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2580 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2581 a8083063 Iustin Pop

2582 a8083063 Iustin Pop
  This always creates all devices.
2583 a8083063 Iustin Pop

2584 a8083063 Iustin Pop
  """
2585 a8083063 Iustin Pop
  if device.children:
2586 a8083063 Iustin Pop
    for child in device.children:
2587 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2588 a8083063 Iustin Pop
        return False
2589 a8083063 Iustin Pop
2590 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2591 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2592 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2593 a8083063 Iustin Pop
  if not new_id:
2594 a8083063 Iustin Pop
    return False
2595 a8083063 Iustin Pop
  if device.physical_id is None:
2596 a8083063 Iustin Pop
    device.physical_id = new_id
2597 a8083063 Iustin Pop
  return True
2598 a8083063 Iustin Pop
2599 a8083063 Iustin Pop
2600 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2601 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2602 a8083063 Iustin Pop

2603 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2604 a8083063 Iustin Pop
  all its children.
2605 a8083063 Iustin Pop

2606 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2607 a8083063 Iustin Pop

2608 a8083063 Iustin Pop
  """
2609 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2610 a8083063 Iustin Pop
    force = True
2611 a8083063 Iustin Pop
  if device.children:
2612 a8083063 Iustin Pop
    for child in device.children:
2613 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2614 3f78eef2 Iustin Pop
                                        child, force, info):
2615 a8083063 Iustin Pop
        return False
2616 a8083063 Iustin Pop
2617 a8083063 Iustin Pop
  if not force:
2618 a8083063 Iustin Pop
    return True
2619 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2620 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2621 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2622 a8083063 Iustin Pop
  if not new_id:
2623 a8083063 Iustin Pop
    return False
2624 a8083063 Iustin Pop
  if device.physical_id is None:
2625 a8083063 Iustin Pop
    device.physical_id = new_id
2626 a8083063 Iustin Pop
  return True
2627 a8083063 Iustin Pop
2628 a8083063 Iustin Pop
2629 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2630 923b1523 Iustin Pop
  """Generate a suitable LV name.
2631 923b1523 Iustin Pop

2632 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2633 923b1523 Iustin Pop

2634 923b1523 Iustin Pop
  """
2635 923b1523 Iustin Pop
  results = []
2636 923b1523 Iustin Pop
  for val in exts:
2637 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2638 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2639 923b1523 Iustin Pop
  return results
2640 923b1523 Iustin Pop
2641 923b1523 Iustin Pop
2642 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2643 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2644 a8083063 Iustin Pop

2645 a8083063 Iustin Pop
  """
2646 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2647 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2648 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2649 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2650 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2651 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2652 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
2653 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2654 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2655 a8083063 Iustin Pop
  return drbd_dev
2656 a8083063 Iustin Pop
2657 a8083063 Iustin Pop
2658 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2659 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2660 a1f445d3 Iustin Pop

2661 a1f445d3 Iustin Pop
  """
2662 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2663 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2664 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2665 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2666 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2667 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2668 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2669 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2670 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2671 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2672 a1f445d3 Iustin Pop
  return drbd_dev
2673 a1f445d3 Iustin Pop
2674 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2675 a8083063 Iustin Pop
                          instance_name, primary_node,
2676 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2677 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2678 a8083063 Iustin Pop

2679 a8083063 Iustin Pop
  """
2680 a8083063 Iustin Pop
  #TODO: compute space requirements
2681 a8083063 Iustin Pop
2682 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2683 a8083063 Iustin Pop
  if template_name == "diskless":
2684 a8083063 Iustin Pop
    disks = []
2685 a8083063 Iustin Pop
  elif template_name == "plain":
2686 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2687 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2688 923b1523 Iustin Pop
2689 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2690 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2691 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2692 a8083063 Iustin Pop
                           iv_name = "sda")
2693 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2694 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2695 a8083063 Iustin Pop
                           iv_name = "sdb")
2696 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2697 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2698 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2699 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2700 923b1523 Iustin Pop
2701 923b1523 Iustin Pop
2702 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2703 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2704 fe96220b Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2705 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2706 fe96220b Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2707 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2708 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
2709 a8083063 Iustin Pop
                              size=disk_sz,
2710 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2711 fe96220b Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2712 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2713 fe96220b Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2714 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2715 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
2716 a8083063 Iustin Pop
                              size=swap_sz,
2717 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2718 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2719 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2720 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2721 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2722 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2723 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2724 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2725 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2726 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2727 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
2728 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2729 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2730 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2731 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
2732 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2733 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2734 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2735 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2736 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2737 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2738 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2739 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2740 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2741 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2742 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2743 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2744 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2745 a8083063 Iustin Pop
  else:
2746 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2747 a8083063 Iustin Pop
  return disks
2748 a8083063 Iustin Pop
2749 a8083063 Iustin Pop
2750 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2751 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2752 3ecf6786 Iustin Pop

2753 3ecf6786 Iustin Pop
  """
2754 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2755 a0c3fea1 Michael Hanselmann
2756 a0c3fea1 Michael Hanselmann
2757 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2758 a8083063 Iustin Pop
  """Create all disks for an instance.
2759 a8083063 Iustin Pop

2760 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2761 a8083063 Iustin Pop

2762 a8083063 Iustin Pop
  Args:
2763 a8083063 Iustin Pop
    instance: the instance object
2764 a8083063 Iustin Pop

2765 a8083063 Iustin Pop
  Returns:
2766 a8083063 Iustin Pop
    True or False showing the success of the creation process
2767 a8083063 Iustin Pop

2768 a8083063 Iustin Pop
  """
2769 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2770 a0c3fea1 Michael Hanselmann
2771 a8083063 Iustin Pop
  for device in instance.disks:
2772 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2773 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2774 a8083063 Iustin Pop
    #HARDCODE
2775 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2776 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2777 3f78eef2 Iustin Pop
                                        device, False, info):
2778 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2779 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2780 a8083063 Iustin Pop
        return False
2781 a8083063 Iustin Pop
    #HARDCODE
2782 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2783 3f78eef2 Iustin Pop
                                    instance, device, info):
2784 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2785 a8083063 Iustin Pop
                   device.iv_name)
2786 a8083063 Iustin Pop
      return False
2787 a8083063 Iustin Pop
  return True
2788 a8083063 Iustin Pop
2789 a8083063 Iustin Pop
2790 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2791 a8083063 Iustin Pop
  """Remove all disks for an instance.
2792 a8083063 Iustin Pop

2793 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2794 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2795 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2796 a8083063 Iustin Pop
  with `_CreateDisks()`).
2797 a8083063 Iustin Pop

2798 a8083063 Iustin Pop
  Args:
2799 a8083063 Iustin Pop
    instance: the instance object
2800 a8083063 Iustin Pop

2801 a8083063 Iustin Pop
  Returns:
2802 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2803 a8083063 Iustin Pop

2804 a8083063 Iustin Pop
  """
2805 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2806 a8083063 Iustin Pop
2807 a8083063 Iustin Pop
  result = True
2808 a8083063 Iustin Pop
  for device in instance.disks:
2809 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2810 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2811 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2812 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2813 a8083063 Iustin Pop
                     " continuing anyway" %
2814 a8083063 Iustin Pop
                     (device.iv_name, node))
2815 a8083063 Iustin Pop
        result = False
2816 a8083063 Iustin Pop
  return result
2817 a8083063 Iustin Pop
2818 a8083063 Iustin Pop
2819 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2820 a8083063 Iustin Pop
  """Create an instance.
2821 a8083063 Iustin Pop

2822 a8083063 Iustin Pop
  """
2823 a8083063 Iustin Pop
  HPATH = "instance-add"
2824 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2825 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2826 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2827 bdd55f71 Iustin Pop
              "wait_for_sync", "ip_check"]
2828 a8083063 Iustin Pop
2829 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2830 a8083063 Iustin Pop
    """Build hooks env.
2831 a8083063 Iustin Pop

2832 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2833 a8083063 Iustin Pop

2834 a8083063 Iustin Pop
    """
2835 a8083063 Iustin Pop
    env = {
2836 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2837 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2838 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2839 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2840 a8083063 Iustin Pop
      }
2841 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2842 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2843 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2844 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2845 396e1b78 Michael Hanselmann
2846 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2847 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2848 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2849 396e1b78 Michael Hanselmann
      status=self.instance_status,
2850 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2851 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2852 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2853 396e1b78 Michael Hanselmann
      nics=[(self.inst_ip, self.op.bridge)],
2854 396e1b78 Michael Hanselmann
    ))
2855 a8083063 Iustin Pop
2856 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2857 a8083063 Iustin Pop
          self.secondaries)
2858 a8083063 Iustin Pop
    return env, nl, nl
2859 a8083063 Iustin Pop
2860 a8083063 Iustin Pop
2861 a8083063 Iustin Pop
  def CheckPrereq(self):
2862 a8083063 Iustin Pop
    """Check prerequisites.
2863 a8083063 Iustin Pop

2864 a8083063 Iustin Pop
    """
2865 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2866 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2867 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2868 3ecf6786 Iustin Pop
                                 self.op.mode)
2869 a8083063 Iustin Pop
2870 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2871 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2872 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2873 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2874 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2875 3ecf6786 Iustin Pop
                                   " node and path options")
2876 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2877 a8083063 Iustin Pop
      if src_node_full is None:
2878 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2879 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2880 a8083063 Iustin Pop
2881 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2882 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2883 a8083063 Iustin Pop
2884 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2885 a8083063 Iustin Pop
2886 a8083063 Iustin Pop
      if not export_info:
2887 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2888 a8083063 Iustin Pop
2889 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2890 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2891 a8083063 Iustin Pop
2892 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2893 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2894 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2895 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2896 a8083063 Iustin Pop
2897 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2898 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2899 3ecf6786 Iustin Pop
                                   " one data disk")
2900 a8083063 Iustin Pop
2901 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2902 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2903 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2904 a8083063 Iustin Pop
                                                         'disk0_dump'))
2905 a8083063 Iustin Pop
      self.src_image = diskimage
2906 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2907 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2908 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2909 a8083063 Iustin Pop
2910 a8083063 Iustin Pop
    # check primary node
2911 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2912 a8083063 Iustin Pop
    if pnode is None:
2913 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2914 3ecf6786 Iustin Pop
                                 self.op.pnode)
2915 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2916 a8083063 Iustin Pop
    self.pnode = pnode
2917 a8083063 Iustin Pop
    self.secondaries = []
2918 a8083063 Iustin Pop
    # disk template and mirror node verification
2919 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2920 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2921 a8083063 Iustin Pop
2922 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
2923 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2924 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
2925 3ecf6786 Iustin Pop
                                   " a mirror node")
2926 a8083063 Iustin Pop
2927 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2928 a8083063 Iustin Pop
      if snode_name is None:
2929 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2930 3ecf6786 Iustin Pop
                                   self.op.snode)
2931 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2932 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
2933 3ecf6786 Iustin Pop
                                   " the primary node.")
2934 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2935 a8083063 Iustin Pop
2936 ed1ebc60 Guido Trotter
    # Check lv size requirements
2937 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2938 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2939 ed1ebc60 Guido Trotter
2940 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2941 ed1ebc60 Guido Trotter
    req_size_dict = {
2942 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2943 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2944 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2945 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2946 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2947 a1f445d3 Iustin Pop
      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
2948 ed1ebc60 Guido Trotter
    }
2949 ed1ebc60 Guido Trotter
2950 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2951 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
2952 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
2953 ed1ebc60 Guido Trotter
2954 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2955 ed1ebc60 Guido Trotter
2956 ed1ebc60 Guido Trotter
    for node in nodenames:
2957 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2958 ed1ebc60 Guido Trotter
      if not info:
2959 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
2960 3ecf6786 Iustin Pop
                                   " from node '%s'" % nodeinfo)
2961 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2962 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s."
2963 3ecf6786 Iustin Pop
                                   " %d MB available, %d MB required" %
2964 3ecf6786 Iustin Pop
                                   (node, info['vg_free'], req_size))
2965 ed1ebc60 Guido Trotter
2966 a8083063 Iustin Pop
    # os verification
2967 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2968 dfa96ded Guido Trotter
    if not os_obj:
2969 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
2970 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
2971 a8083063 Iustin Pop
2972 a8083063 Iustin Pop
    # instance verification
2973 89e1fc26 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
2974 a8083063 Iustin Pop
2975 bcf043c9 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
2976 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2977 a8083063 Iustin Pop
    if instance_name in instance_list:
2978 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2979 3ecf6786 Iustin Pop
                                 instance_name)
2980 a8083063 Iustin Pop
2981 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2982 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2983 a8083063 Iustin Pop
      inst_ip = None
2984 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2985 bcf043c9 Iustin Pop
      inst_ip = hostname1.ip
2986 a8083063 Iustin Pop
    else:
2987 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2988 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
2989 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
2990 a8083063 Iustin Pop
      inst_ip = ip
2991 a8083063 Iustin Pop
    self.inst_ip = inst_ip
2992 a8083063 Iustin Pop
2993 bdd55f71 Iustin Pop
    if self.op.start and not self.op.ip_check:
2994 bdd55f71 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
2995 bdd55f71 Iustin Pop
                                 " adding an instance in start mode")
2996 bdd55f71 Iustin Pop
2997 bdd55f71 Iustin Pop
    if self.op.ip_check:
2998 16abfbc2 Alexander Schreiber
      if utils.TcpPing(utils.HostInfo().name, hostname1.ip,
2999 16abfbc2 Alexander Schreiber
                       constants.DEFAULT_NODED_PORT):
3000 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
3001 16abfbc2 Alexander Schreiber
                                   (hostname1.ip, instance_name))
3002 a8083063 Iustin Pop
3003 a8083063 Iustin Pop
    # bridge verification
3004 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
3005 a8083063 Iustin Pop
    if bridge is None:
3006 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
3007 a8083063 Iustin Pop
    else:
3008 a8083063 Iustin Pop
      self.op.bridge = bridge
3009 a8083063 Iustin Pop
3010 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
3011 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
3012 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
3013 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
3014 a8083063 Iustin Pop
3015 a8083063 Iustin Pop
    if self.op.start:
3016 a8083063 Iustin Pop
      self.instance_status = 'up'
3017 a8083063 Iustin Pop
    else:
3018 a8083063 Iustin Pop
      self.instance_status = 'down'
3019 a8083063 Iustin Pop
3020 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3021 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3022 a8083063 Iustin Pop

3023 a8083063 Iustin Pop
    """
3024 a8083063 Iustin Pop
    instance = self.op.instance_name
3025 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3026 a8083063 Iustin Pop
3027 a8083063 Iustin Pop
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
3028 a8083063 Iustin Pop
    if self.inst_ip is not None:
3029 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3030 a8083063 Iustin Pop
3031 58acb49d Alexander Schreiber
    network_port = None  # placeholder assignment for later
3032 58acb49d Alexander Schreiber
3033 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3034 a8083063 Iustin Pop
                                  self.op.disk_template,
3035 a8083063 Iustin Pop
                                  instance, pnode_name,
3036 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3037 a8083063 Iustin Pop
                                  self.op.swap_size)
3038 a8083063 Iustin Pop
3039 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3040 a8083063 Iustin Pop
                            primary_node=pnode_name,
3041 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3042 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3043 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3044 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3045 a8083063 Iustin Pop
                            status=self.instance_status,
3046 58acb49d Alexander Schreiber
                            network_port=network_port,
3047 a8083063 Iustin Pop
                            )
3048 a8083063 Iustin Pop
3049 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3050 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3051 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3052 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3053 a8083063 Iustin Pop
3054 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3055 a8083063 Iustin Pop
3056 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3057 a8083063 Iustin Pop
3058 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3059 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc)
3060 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3061 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3062 a8083063 Iustin Pop
      time.sleep(15)
3063 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3064 5bfac263 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, self.proc, oneshot=True)
3065 a8083063 Iustin Pop
    else:
3066 a8083063 Iustin Pop
      disk_abort = False
3067 a8083063 Iustin Pop
3068 a8083063 Iustin Pop
    if disk_abort:
3069 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3070 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3071 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3072 3ecf6786 Iustin Pop
                               " this instance")
3073 a8083063 Iustin Pop
3074 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3075 a8083063 Iustin Pop
                (instance, pnode_name))
3076 a8083063 Iustin Pop
3077 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3078 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3079 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3080 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3081 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3082 3ecf6786 Iustin Pop
                                   " on node %s" %
3083 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3084 a8083063 Iustin Pop
3085 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3086 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3087 a8083063 Iustin Pop
        src_node = self.op.src_node
3088 a8083063 Iustin Pop
        src_image = self.src_image
3089 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3090 a8083063 Iustin Pop
                                                src_node, src_image):
3091 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3092 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3093 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3094 a8083063 Iustin Pop
      else:
3095 a8083063 Iustin Pop
        # also checked in the prereq part
3096 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3097 3ecf6786 Iustin Pop
                                     % self.op.mode)
3098 a8083063 Iustin Pop
3099 a8083063 Iustin Pop
    if self.op.start:
3100 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3101 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3102 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3103 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3104 a8083063 Iustin Pop
3105 a8083063 Iustin Pop
3106 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3107 a8083063 Iustin Pop
  """Connect to an instance's console.
3108 a8083063 Iustin Pop

3109 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3110 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3111 a8083063 Iustin Pop
  console.
3112 a8083063 Iustin Pop

3113 a8083063 Iustin Pop
  """
3114 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3115 a8083063 Iustin Pop
3116 a8083063 Iustin Pop
  def CheckPrereq(self):
3117 a8083063 Iustin Pop
    """Check prerequisites.
3118 a8083063 Iustin Pop

3119 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3120 a8083063 Iustin Pop

3121 a8083063 Iustin Pop
    """
3122 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3123 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3124 a8083063 Iustin Pop
    if instance is None:
3125 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3126 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3127 a8083063 Iustin Pop
    self.instance = instance
3128 a8083063 Iustin Pop
3129 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3130 a8083063 Iustin Pop
    """Connect to the console of an instance
3131 a8083063 Iustin Pop

3132 a8083063 Iustin Pop
    """
3133 a8083063 Iustin Pop
    instance = self.instance
3134 a8083063 Iustin Pop
    node = instance.primary_node
3135 a8083063 Iustin Pop
3136 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3137 a8083063 Iustin Pop
    if node_insts is False:
3138 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3139 a8083063 Iustin Pop
3140 a8083063 Iustin Pop
    if instance.name not in node_insts:
3141 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3142 a8083063 Iustin Pop
3143 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3144 a8083063 Iustin Pop
3145 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3146 30989e69 Alexander Schreiber
    console_cmd = hyper.GetShellCommandForConsole(instance)
3147 82122173 Iustin Pop
    # build ssh cmdline
3148 82122173 Iustin Pop
    argv = ["ssh", "-q", "-t"]
3149 82122173 Iustin Pop
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
3150 82122173 Iustin Pop
    argv.extend(ssh.BATCH_MODE_OPTS)
3151 82122173 Iustin Pop
    argv.append(node)
3152 82122173 Iustin Pop
    argv.append(console_cmd)
3153 82122173 Iustin Pop
    return "ssh", argv
3154 a8083063 Iustin Pop
3155 a8083063 Iustin Pop
3156 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
3157 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
3158 a8083063 Iustin Pop

3159 a8083063 Iustin Pop
  """
3160 a8083063 Iustin Pop
  HPATH = "mirror-add"
3161 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3162 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3163 a8083063 Iustin Pop
3164 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3165 a8083063 Iustin Pop
    """Build hooks env.
3166 a8083063 Iustin Pop

3167 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3168 a8083063 Iustin Pop

3169 a8083063 Iustin Pop
    """
3170 a8083063 Iustin Pop
    env = {
3171 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3172 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3173 a8083063 Iustin Pop
      }
3174 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3175 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3176 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3177 a8083063 Iustin Pop
    return env, nl, nl
3178 a8083063 Iustin Pop
3179 a8083063 Iustin Pop
  def CheckPrereq(self):
3180 a8083063 Iustin Pop
    """Check prerequisites.
3181 a8083063 Iustin Pop

3182 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3183 a8083063 Iustin Pop

3184 a8083063 Iustin Pop
    """
3185 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3186 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3187 a8083063 Iustin Pop
    if instance is None:
3188 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3189 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3190 a8083063 Iustin Pop
    self.instance = instance
3191 a8083063 Iustin Pop
3192 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3193 a8083063 Iustin Pop
    if remote_node is None:
3194 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3195 a8083063 Iustin Pop
    self.remote_node = remote_node
3196 a8083063 Iustin Pop
3197 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3198 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3199 3ecf6786 Iustin Pop
                                 " the instance.")
3200 a8083063 Iustin Pop
3201 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3202 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3203 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3204 a8083063 Iustin Pop
    for disk in instance.disks:
3205 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3206 a8083063 Iustin Pop
        break
3207 a8083063 Iustin Pop
    else:
3208 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3209 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3210 a8083063 Iustin Pop
    if len(disk.children) > 1:
3211 f4bc1f2c Michael Hanselmann
      raise errors.OpPrereqError("The device already has two slave devices."
3212 f4bc1f2c Michael Hanselmann
                                 " This would create a 3-disk raid1 which we"
3213 f4bc1f2c Michael Hanselmann
                                 " don't allow.")
3214 a8083063 Iustin Pop
    self.disk = disk
3215 a8083063 Iustin Pop
3216 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3217 a8083063 Iustin Pop
    """Add the mirror component
3218 a8083063 Iustin Pop

3219 a8083063 Iustin Pop
    """
3220 a8083063 Iustin Pop
    disk = self.disk
3221 a8083063 Iustin Pop
    instance = self.instance
3222 a8083063 Iustin Pop
3223 a8083063 Iustin Pop
    remote_node = self.remote_node
3224 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3225 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
3226 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3227 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
3228 a8083063 Iustin Pop
3229 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
3230 a8083063 Iustin Pop
    #HARDCODE
3231 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, instance,
3232 3f78eef2 Iustin Pop
                                      new_drbd, False,
3233 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3234 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
3235 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
3236 a8083063 Iustin Pop
3237 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
3238 a8083063 Iustin Pop
    #HARDCODE
3239 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node,
3240 3f78eef2 Iustin Pop
                                    instance, new_drbd,
3241 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
3242 a8083063 Iustin Pop
      # remove secondary dev
3243 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3244 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
3245 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
3246 a8083063 Iustin Pop
3247 a8083063 Iustin Pop
    # the device exists now
3248 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
3249 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
3250 153d9724 Iustin Pop
    if not rpc.call_blockdev_addchildren(instance.primary_node,
3251 153d9724 Iustin Pop
                                         disk, [new_drbd]):
3252 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
3253 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3254 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3255 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
3256 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3257 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3258 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
3259 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
3260 a8083063 Iustin Pop
3261 a8083063 Iustin Pop
    disk.children.append(new_drbd)
3262 a8083063 Iustin Pop
3263 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3264 a8083063 Iustin Pop
3265 5bfac263 Iustin Pop
    _WaitForSync(self.cfg, instance, self.proc)
3266 a8083063 Iustin Pop
3267 a8083063 Iustin Pop
    return 0
3268 a8083063 Iustin Pop
3269 a8083063 Iustin Pop
3270 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
3271 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
3272 a8083063 Iustin Pop

3273 a8083063 Iustin Pop
  """
3274 a8083063 Iustin Pop
  HPATH = "mirror-remove"
3275 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3276 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3277 a8083063 Iustin Pop
3278 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3279 a8083063 Iustin Pop
    """Build hooks env.
3280 a8083063 Iustin Pop

3281 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3282 a8083063 Iustin Pop

3283 a8083063 Iustin Pop
    """
3284 a8083063 Iustin Pop
    env = {
3285 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3286 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
3287 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
3288 a8083063 Iustin Pop
      }
3289 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3290 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3291 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3292 a8083063 Iustin Pop
    return env, nl, nl
3293 a8083063 Iustin Pop
3294 a8083063 Iustin Pop
  def CheckPrereq(self):
3295 a8083063 Iustin Pop
    """Check prerequisites.
3296 a8083063 Iustin Pop

3297 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3298 a8083063 Iustin Pop

3299 a8083063 Iustin Pop
    """
3300 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3301 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3302 a8083063 Iustin Pop
    if instance is None:
3303 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3304 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3305 a8083063 Iustin Pop
    self.instance = instance
3306 a8083063 Iustin Pop
3307 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3308 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3309 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3310 a8083063 Iustin Pop
    for disk in instance.disks:
3311 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3312 a8083063 Iustin Pop
        break
3313 a8083063 Iustin Pop
    else:
3314 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3315 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3316 a8083063 Iustin Pop
    for child in disk.children:
3317 fe96220b Iustin Pop
      if (child.dev_type == constants.LD_DRBD7 and
3318 fe96220b Iustin Pop
          child.logical_id[2] == self.op.disk_id):
3319 a8083063 Iustin Pop
        break
3320 a8083063 Iustin Pop
    else:
3321 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3322 a8083063 Iustin Pop
3323 a8083063 Iustin Pop
    if len(disk.children) < 2:
3324 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3325 3ecf6786 Iustin Pop
                                 " a mirror.")
3326 a8083063 Iustin Pop
    self.disk = disk
3327 a8083063 Iustin Pop
    self.child = child
3328 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3329 a8083063 Iustin Pop
      oid = 1
3330 a8083063 Iustin Pop
    else:
3331 a8083063 Iustin Pop
      oid = 0
3332 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3333 a8083063 Iustin Pop
3334 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3335 a8083063 Iustin Pop
    """Remove the mirror component
3336 a8083063 Iustin Pop

3337 a8083063 Iustin Pop
    """
3338 a8083063 Iustin Pop
    instance = self.instance
3339 a8083063 Iustin Pop
    disk = self.disk
3340 a8083063 Iustin Pop
    child = self.child
3341 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3342 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3343 153d9724 Iustin Pop
    if not rpc.call_blockdev_removechildren(instance.primary_node,
3344 153d9724 Iustin Pop
                                            disk, [child]):
3345 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3346 a8083063 Iustin Pop
3347 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3348 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3349 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3350 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3351 a8083063 Iustin Pop
                     " continuing operation." % node)
3352 a8083063 Iustin Pop
3353 a8083063 Iustin Pop
    disk.children.remove(child)
3354 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3355 a8083063 Iustin Pop
3356 a8083063 Iustin Pop
3357 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3358 a8083063 Iustin Pop
  """Replace the disks of an instance.
3359 a8083063 Iustin Pop

3360 a8083063 Iustin Pop
  """
3361 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3362 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3363 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3364 a8083063 Iustin Pop
3365 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3366 a8083063 Iustin Pop
    """Build hooks env.
3367 a8083063 Iustin Pop

3368 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3369 a8083063 Iustin Pop

3370 a8083063 Iustin Pop
    """
3371 a8083063 Iustin Pop
    env = {
3372 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3373 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3374 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3375 a8083063 Iustin Pop
      }
3376 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3377 0834c866 Iustin Pop
    nl = [
3378 0834c866 Iustin Pop
      self.sstore.GetMasterNode(),
3379 0834c866 Iustin Pop
      self.instance.primary_node,
3380 0834c866 Iustin Pop
      ]
3381 0834c866 Iustin Pop
    if self.op.remote_node is not None:
3382 0834c866 Iustin Pop
      nl.append(self.op.remote_node)
3383 a8083063 Iustin Pop
    return env, nl, nl
3384 a8083063 Iustin Pop
3385 a8083063 Iustin Pop
  def CheckPrereq(self):
3386 a8083063 Iustin Pop
    """Check prerequisites.
3387 a8083063 Iustin Pop

3388 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3389 a8083063 Iustin Pop

3390 a8083063 Iustin Pop
    """
3391 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3392 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3393 a8083063 Iustin Pop
    if instance is None:
3394 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3395 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3396 a8083063 Iustin Pop
    self.instance = instance
3397 7df43a76 Iustin Pop
    self.op.instance_name = instance.name
3398 a8083063 Iustin Pop
3399 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3400 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3401 a9e0c397 Iustin Pop
                                 " network mirrored.")
3402 a8083063 Iustin Pop
3403 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3404 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3405 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3406 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3407 a8083063 Iustin Pop
3408 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3409 a9e0c397 Iustin Pop
3410 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3411 a9e0c397 Iustin Pop
    if remote_node is not None:
3412 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3413 a8083063 Iustin Pop
      if remote_node is None:
3414 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3415 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3416 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3417 a9e0c397 Iustin Pop
    else:
3418 a9e0c397 Iustin Pop
      self.remote_node_info = None
3419 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3420 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3421 3ecf6786 Iustin Pop
                                 " the instance.")
3422 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3423 0834c866 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_SEC:
3424 0834c866 Iustin Pop
        # this is for DRBD8, where we can't execute the same mode of
3425 0834c866 Iustin Pop
        # replacement as for drbd7 (no different port allocated)
3426 0834c866 Iustin Pop
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3427 0834c866 Iustin Pop
                                   " replacement")
3428 a9e0c397 Iustin Pop
      # the user gave the current secondary, switch to
3429 0834c866 Iustin Pop
      # 'no-replace-secondary' mode for drbd7
3430 a9e0c397 Iustin Pop
      remote_node = None
3431 a9e0c397 Iustin Pop
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3432 a9e0c397 Iustin Pop
        self.op.mode != constants.REPLACE_DISK_ALL):
3433 a9e0c397 Iustin Pop
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3434 a9e0c397 Iustin Pop
                                 " disks replacement, not individual ones")
3435 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3436 7df43a76 Iustin Pop
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3437 7df43a76 Iustin Pop
          remote_node is not None):
3438 7df43a76 Iustin Pop
        # switch to replace secondary mode
3439 7df43a76 Iustin Pop
        self.op.mode = constants.REPLACE_DISK_SEC
3440 7df43a76 Iustin Pop
3441 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3442 12c3449a Michael Hanselmann
        raise errors.OpPrereqError("Template 'drbd' only allows primary or"
3443 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3444 a9e0c397 Iustin Pop
                                   " both at once")
3445 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3446 a9e0c397 Iustin Pop
        if remote_node is not None:
3447 12c3449a Michael Hanselmann
          raise errors.OpPrereqError("Template 'drbd' does not allow changing"
3448 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3449 a9e0c397 Iustin Pop
                                     " node disk replacement")
3450 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3451 cff90b79 Iustin Pop
        self.oth_node = instance.secondary_nodes[0]
3452 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3453 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3454 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3455 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3456 cff90b79 Iustin Pop
        self.oth_node = instance.primary_node
3457 a9e0c397 Iustin Pop
      else:
3458 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3459 a9e0c397 Iustin Pop
3460 a9e0c397 Iustin Pop
    for name in self.op.disks:
3461 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3462 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3463 a9e0c397 Iustin Pop
                                   (name, instance.name))
3464 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3465 a8083063 Iustin Pop
3466 a9e0c397 Iustin Pop
  def _ExecRR1(self, feedback_fn):
3467 a8083063 Iustin Pop
    """Replace the disks of an instance.
3468 a8083063 Iustin Pop

3469 a8083063 Iustin Pop
    """
3470 a8083063 Iustin Pop
    instance = self.instance
3471 a8083063 Iustin Pop
    iv_names = {}
3472 a8083063 Iustin Pop
    # start of work
3473 a9e0c397 Iustin Pop
    if self.op.remote_node is None:
3474 a9e0c397 Iustin Pop
      remote_node = self.sec_node
3475 a9e0c397 Iustin Pop
    else:
3476 a9e0c397 Iustin Pop
      remote_node = self.op.remote_node
3477 a8083063 Iustin Pop
    cfg = self.cfg
3478 a8083063 Iustin Pop
    for dev in instance.disks:
3479 a8083063 Iustin Pop
      size = dev.size
3480 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3481 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3482 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3483 923b1523 Iustin Pop
                                       remote_node, size, names)
3484 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3485 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3486 a8083063 Iustin Pop
                  dev.iv_name)
3487 a8083063 Iustin Pop
      #HARDCODE
3488 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3489 3f78eef2 Iustin Pop
                                        new_drbd, False,
3490 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3491 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create new component on secondary"
3492 f4bc1f2c Michael Hanselmann
                                 " node %s. Full abort, cleanup manually!" %
3493 3ecf6786 Iustin Pop
                                 remote_node)
3494 a8083063 Iustin Pop
3495 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3496 a8083063 Iustin Pop
      #HARDCODE
3497 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3498 3f78eef2 Iustin Pop
                                      instance, new_drbd,
3499 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3500 a8083063 Iustin Pop
        # remove secondary dev
3501 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3502 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3503 f4bc1f2c Michael Hanselmann
        raise errors.OpExecError("Failed to create volume on primary!"
3504 f4bc1f2c Michael Hanselmann
                                 " Full abort, cleanup manually!!")
3505 a8083063 Iustin Pop
3506 a8083063 Iustin Pop
      # the device exists now
3507 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3508 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3509 153d9724 Iustin Pop
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3510 153d9724 Iustin Pop
                                           [new_drbd]):
3511 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3512 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3513 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3514 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3515 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3516 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3517 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3518 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3519 a8083063 Iustin Pop
3520 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3521 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3522 a8083063 Iustin Pop
3523 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3524 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3525 a8083063 Iustin Pop
    # return value
3526 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3527 a8083063 Iustin Pop
3528 a8083063 Iustin Pop
    # so check manually all the devices
3529 a8083063 Iustin Pop
    for name in iv_names:
3530 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3531 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3532 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3533 a8083063 Iustin Pop
      if is_degr:
3534 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3535 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3536 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3537 a8083063 Iustin Pop
      if is_degr:
3538 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3539 a8083063 Iustin Pop
3540 a8083063 Iustin Pop
    for name in iv_names:
3541 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3542 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3543 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3544 153d9724 Iustin Pop
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3545 153d9724 Iustin Pop
                                              dev, [child]):
3546 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3547 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3548 a8083063 Iustin Pop
        continue
3549 a8083063 Iustin Pop
3550 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3551 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3552 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3553 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3554 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3555 a8083063 Iustin Pop
                       " continuing operation." % node)
3556 a8083063 Iustin Pop
3557 a8083063 Iustin Pop
      dev.children.remove(child)
3558 a8083063 Iustin Pop
3559 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3560 a8083063 Iustin Pop
3561 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3562 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3563 a9e0c397 Iustin Pop

3564 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3565 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3566 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3567 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3568 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3569 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3570 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3571 a9e0c397 Iustin Pop
      - wait for sync across all devices
3572 a9e0c397 Iustin Pop
      - for each modified disk:
3573 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3574 a9e0c397 Iustin Pop

3575 a9e0c397 Iustin Pop
    Failures are not very well handled.
3576 cff90b79 Iustin Pop

3577 a9e0c397 Iustin Pop
    """
3578 cff90b79 Iustin Pop
    steps_total = 6
3579 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3580 a9e0c397 Iustin Pop
    instance = self.instance
3581 a9e0c397 Iustin Pop
    iv_names = {}
3582 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3583 a9e0c397 Iustin Pop
    # start of work
3584 a9e0c397 Iustin Pop
    cfg = self.cfg
3585 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3586 cff90b79 Iustin Pop
    oth_node = self.oth_node
3587 cff90b79 Iustin Pop
3588 cff90b79 Iustin Pop
    # Step: check device activation
3589 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3590 cff90b79 Iustin Pop
    info("checking volume groups")
3591 cff90b79 Iustin Pop
    my_vg = cfg.GetVGName()
3592 cff90b79 Iustin Pop
    results = rpc.call_vg_list([oth_node, tgt_node])
3593 cff90b79 Iustin Pop
    if not results:
3594 cff90b79 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3595 cff90b79 Iustin Pop
    for node in oth_node, tgt_node:
3596 cff90b79 Iustin Pop
      res = results.get(node, False)
3597 cff90b79 Iustin Pop
      if not res or my_vg not in res:
3598 cff90b79 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3599 cff90b79 Iustin Pop
                                 (my_vg, node))
3600 cff90b79 Iustin Pop
    for dev in instance.disks:
3601 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3602 cff90b79 Iustin Pop
        continue
3603 cff90b79 Iustin Pop
      for node in tgt_node, oth_node:
3604 cff90b79 Iustin Pop
        info("checking %s on %s" % (dev.iv_name, node))
3605 cff90b79 Iustin Pop
        cfg.SetDiskID(dev, node)
3606 cff90b79 Iustin Pop
        if not rpc.call_blockdev_find(node, dev):
3607 cff90b79 Iustin Pop
          raise errors.OpExecError("Can't find device %s on node %s" %
3608 cff90b79 Iustin Pop
                                   (dev.iv_name, node))
3609 cff90b79 Iustin Pop
3610 cff90b79 Iustin Pop
    # Step: check other node consistency
3611 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3612 cff90b79 Iustin Pop
    for dev in instance.disks:
3613 cff90b79 Iustin Pop
      if not dev.iv_name in self.op.disks:
3614 cff90b79 Iustin Pop
        continue
3615 cff90b79 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, oth_node))
3616 cff90b79 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, oth_node,
3617 cff90b79 Iustin Pop
                                   oth_node==instance.primary_node):
3618 cff90b79 Iustin Pop
        raise errors.OpExecError("Peer node (%s) has degraded storage, unsafe"
3619 cff90b79 Iustin Pop
                                 " to replace disks on this node (%s)" %
3620 cff90b79 Iustin Pop
                                 (oth_node, tgt_node))
3621 cff90b79 Iustin Pop
3622 cff90b79 Iustin Pop
    # Step: create new storage
3623 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3624 a9e0c397 Iustin Pop
    for dev in instance.disks:
3625 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3626 a9e0c397 Iustin Pop
        continue
3627 a9e0c397 Iustin Pop
      size = dev.size
3628 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3629 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3630 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3631 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3632 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3633 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3634 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3635 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3636 a9e0c397 Iustin Pop
      old_lvs = dev.children
3637 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3638 cff90b79 Iustin Pop
      info("creating new local storage on %s for %s" %
3639 cff90b79 Iustin Pop
           (tgt_node, dev.iv_name))
3640 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3641 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3642 a9e0c397 Iustin Pop
      # are talking about the secondary node
3643 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3644 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3645 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3646 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3647 a9e0c397 Iustin Pop
                                   " node '%s'" %
3648 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3649 a9e0c397 Iustin Pop
3650 cff90b79 Iustin Pop
    # Step: for each lv, detach+rename*2+attach
3651 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "change drbd configuration")
3652 cff90b79 Iustin Pop
    for dev, old_lvs, new_lvs in iv_names.itervalues():
3653 cff90b79 Iustin Pop
      info("detaching %s drbd from local storage" % dev.iv_name)
3654 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3655 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3656 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3657 cff90b79 Iustin Pop
      #dev.children = []
3658 cff90b79 Iustin Pop
      #cfg.Update(instance)
3659 a9e0c397 Iustin Pop
3660 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3661 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3662 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3663 a9e0c397 Iustin Pop
      # using the assumption than logical_id == physical_id (which in
3664 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3665 cff90b79 Iustin Pop
3666 cff90b79 Iustin Pop
      # FIXME(iustin): use a better name for the replaced LVs
3667 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3668 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3669 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3670 cff90b79 Iustin Pop
      # build the rename list based on what LVs exist on the node
3671 cff90b79 Iustin Pop
      rlist = []
3672 cff90b79 Iustin Pop
      for to_ren in old_lvs:
3673 cff90b79 Iustin Pop
        find_res = rpc.call_blockdev_find(tgt_node, to_ren)
3674 cff90b79 Iustin Pop
        if find_res is not None: # device exists
3675 cff90b79 Iustin Pop
          rlist.append((to_ren, ren_fn(to_ren, temp_suffix)))
3676 cff90b79 Iustin Pop
3677 cff90b79 Iustin Pop
      info("renaming the old LVs on the target node")
3678 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3679 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename old LVs on node %s" % tgt_node)
3680 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3681 cff90b79 Iustin Pop
      info("renaming the new LVs on the target node")
3682 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3683 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3684 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't rename new LVs on node %s" % tgt_node)
3685 cff90b79 Iustin Pop
3686 cff90b79 Iustin Pop
      for old, new in zip(old_lvs, new_lvs):
3687 cff90b79 Iustin Pop
        new.logical_id = old.logical_id
3688 cff90b79 Iustin Pop
        cfg.SetDiskID(new, tgt_node)
3689 a9e0c397 Iustin Pop
3690 cff90b79 Iustin Pop
      for disk in old_lvs:
3691 cff90b79 Iustin Pop
        disk.logical_id = ren_fn(disk, temp_suffix)
3692 cff90b79 Iustin Pop
        cfg.SetDiskID(disk, tgt_node)
3693 a9e0c397 Iustin Pop
3694 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3695 cff90b79 Iustin Pop
      info("adding new mirror component on %s" % tgt_node)
3696 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3697 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3698 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3699 79caa9ed Guido Trotter
            warning("Can't rollback device %s", hint="manually cleanup unused"
3700 cff90b79 Iustin Pop
                    " logical volumes")
3701 cff90b79 Iustin Pop
        raise errors.OpExecError("Can't add local storage to drbd")
3702 a9e0c397 Iustin Pop
3703 a9e0c397 Iustin Pop
      dev.children = new_lvs
3704 a9e0c397 Iustin Pop
      cfg.Update(instance)
3705 a9e0c397 Iustin Pop
3706 cff90b79 Iustin Pop
    # Step: wait for sync
3707 a9e0c397 Iustin Pop
3708 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3709 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3710 a9e0c397 Iustin Pop
    # return value
3711 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3712 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3713 a9e0c397 Iustin Pop
3714 a9e0c397 Iustin Pop
    # so check manually all the devices
3715 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3716 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3717 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3718 a9e0c397 Iustin Pop
      if is_degr:
3719 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3720 a9e0c397 Iustin Pop
3721 cff90b79 Iustin Pop
    # Step: remove old storage
3722 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3723 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3724 cff90b79 Iustin Pop
      info("remove logical volumes for %s" % name)
3725 a9e0c397 Iustin Pop
      for lv in old_lvs:
3726 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3727 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3728 79caa9ed Guido Trotter
          warning("Can't remove old LV", hint="manually remove unused LVs")
3729 a9e0c397 Iustin Pop
          continue
3730 a9e0c397 Iustin Pop
3731 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3732 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3733 a9e0c397 Iustin Pop

3734 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3735 a9e0c397 Iustin Pop
      - for all disks of the instance:
3736 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3737 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3738 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3739 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3740 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3741 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3742 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3743 a9e0c397 Iustin Pop
          not network enabled
3744 a9e0c397 Iustin Pop
      - wait for sync across all devices
3745 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3746 a9e0c397 Iustin Pop

3747 a9e0c397 Iustin Pop
    Failures are not very well handled.
3748 0834c866 Iustin Pop

3749 a9e0c397 Iustin Pop
    """
3750 0834c866 Iustin Pop
    steps_total = 6
3751 5bfac263 Iustin Pop
    warning, info = (self.proc.LogWarning, self.proc.LogInfo)
3752 a9e0c397 Iustin Pop
    instance = self.instance
3753 a9e0c397 Iustin Pop
    iv_names = {}
3754 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3755 a9e0c397 Iustin Pop
    # start of work
3756 a9e0c397 Iustin Pop
    cfg = self.cfg
3757 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3758 a9e0c397 Iustin Pop
    new_node = self.new_node
3759 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3760 0834c866 Iustin Pop
3761 0834c866 Iustin Pop
    # Step: check device activation
3762 5bfac263 Iustin Pop
    self.proc.LogStep(1, steps_total, "check device existence")
3763 0834c866 Iustin Pop
    info("checking volume groups")
3764 0834c866 Iustin Pop
    my_vg = cfg.GetVGName()
3765 0834c866 Iustin Pop
    results = rpc.call_vg_list([pri_node, new_node])
3766 0834c866 Iustin Pop
    if not results:
3767 0834c866 Iustin Pop
      raise errors.OpExecError("Can't list volume groups on the nodes")
3768 0834c866 Iustin Pop
    for node in pri_node, new_node:
3769 0834c866 Iustin Pop
      res = results.get(node, False)
3770 0834c866 Iustin Pop
      if not res or my_vg not in res:
3771 0834c866 Iustin Pop
        raise errors.OpExecError("Volume group '%s' not found on %s" %
3772 0834c866 Iustin Pop
                                 (my_vg, node))
3773 0834c866 Iustin Pop
    for dev in instance.disks:
3774 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3775 0834c866 Iustin Pop
        continue
3776 0834c866 Iustin Pop
      info("checking %s on %s" % (dev.iv_name, pri_node))
3777 0834c866 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3778 0834c866 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3779 0834c866 Iustin Pop
        raise errors.OpExecError("Can't find device %s on node %s" %
3780 0834c866 Iustin Pop
                                 (dev.iv_name, pri_node))
3781 0834c866 Iustin Pop
3782 0834c866 Iustin Pop
    # Step: check other node consistency
3783 5bfac263 Iustin Pop
    self.proc.LogStep(2, steps_total, "check peer consistency")
3784 0834c866 Iustin Pop
    for dev in instance.disks:
3785 0834c866 Iustin Pop
      if not dev.iv_name in self.op.disks:
3786 0834c866 Iustin Pop
        continue
3787 0834c866 Iustin Pop
      info("checking %s consistency on %s" % (dev.iv_name, pri_node))
3788 0834c866 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, pri_node, True, ldisk=True):
3789 0834c866 Iustin Pop
        raise errors.OpExecError("Primary node (%s) has degraded storage,"
3790 0834c866 Iustin Pop
                                 " unsafe to replace the secondary" %
3791 0834c866 Iustin Pop
                                 pri_node)
3792 0834c866 Iustin Pop
3793 0834c866 Iustin Pop
    # Step: create new storage
3794 5bfac263 Iustin Pop
    self.proc.LogStep(3, steps_total, "allocate new storage")
3795 a9e0c397 Iustin Pop
    for dev in instance.disks:
3796 a9e0c397 Iustin Pop
      size = dev.size
3797 0834c866 Iustin Pop
      info("adding new local storage on %s for %s" % (new_node, dev.iv_name))
3798 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3799 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3800 a9e0c397 Iustin Pop
      # are talking about the secondary node
3801 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3802 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3803 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3804 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3805 a9e0c397 Iustin Pop
                                   " node '%s'" %
3806 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3807 a9e0c397 Iustin Pop
3808 0834c866 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3809 0834c866 Iustin Pop
3810 5bfac263 Iustin Pop
    self.proc.LogStep(4, steps_total, "changing drbd configuration")
3811 0834c866 Iustin Pop
    for dev in instance.disks:
3812 0834c866 Iustin Pop
      size = dev.size
3813 0834c866 Iustin Pop
      info("activating a new drbd on %s for %s" % (new_node, dev.iv_name))
3814 a9e0c397 Iustin Pop
      # create new devices on new_node
3815 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3816 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3817 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3818 a9e0c397 Iustin Pop
                              children=dev.children)
3819 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3820 3f78eef2 Iustin Pop
                                        new_drbd, False,
3821 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3822 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3823 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3824 a9e0c397 Iustin Pop
3825 0834c866 Iustin Pop
    for dev in instance.disks:
3826 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3827 0834c866 Iustin Pop
      info("shutting down drbd for %s on old node" % dev.iv_name)
3828 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3829 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3830 0834c866 Iustin Pop
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3831 79caa9ed Guido Trotter
                hint="Please cleanup this device manually as soon as possible")
3832 a9e0c397 Iustin Pop
3833 642445d9 Iustin Pop
    info("detaching primary drbds from the network (=> standalone)")
3834 642445d9 Iustin Pop
    done = 0
3835 642445d9 Iustin Pop
    for dev in instance.disks:
3836 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3837 642445d9 Iustin Pop
      # set the physical (unique in bdev terms) id to None, meaning
3838 642445d9 Iustin Pop
      # detach from network
3839 642445d9 Iustin Pop
      dev.physical_id = (None,) * len(dev.physical_id)
3840 642445d9 Iustin Pop
      # and 'find' the device, which will 'fix' it to match the
3841 642445d9 Iustin Pop
      # standalone state
3842 642445d9 Iustin Pop
      if rpc.call_blockdev_find(pri_node, dev):
3843 642445d9 Iustin Pop
        done += 1
3844 642445d9 Iustin Pop
      else:
3845 642445d9 Iustin Pop
        warning("Failed to detach drbd %s from network, unusual case" %
3846 642445d9 Iustin Pop
                dev.iv_name)
3847 642445d9 Iustin Pop
3848 642445d9 Iustin Pop
    if not done:
3849 642445d9 Iustin Pop
      # no detaches succeeded (very unlikely)
3850 642445d9 Iustin Pop
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3851 642445d9 Iustin Pop
3852 642445d9 Iustin Pop
    # if we managed to detach at least one, we update all the disks of
3853 642445d9 Iustin Pop
    # the instance to point to the new secondary
3854 642445d9 Iustin Pop
    info("updating instance configuration")
3855 642445d9 Iustin Pop
    for dev in instance.disks:
3856 642445d9 Iustin Pop
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3857 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3858 642445d9 Iustin Pop
    cfg.Update(instance)
3859 a9e0c397 Iustin Pop
3860 642445d9 Iustin Pop
    # and now perform the drbd attach
3861 642445d9 Iustin Pop
    info("attaching primary drbds to new secondary (standalone => connected)")
3862 642445d9 Iustin Pop
    failures = []
3863 642445d9 Iustin Pop
    for dev in instance.disks:
3864 642445d9 Iustin Pop
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3865 642445d9 Iustin Pop
      # since the attach is smart, it's enough to 'find' the device,
3866 642445d9 Iustin Pop
      # it will automatically activate the network, if the physical_id
3867 642445d9 Iustin Pop
      # is correct
3868 642445d9 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3869 642445d9 Iustin Pop
      if not rpc.call_blockdev_find(pri_node, dev):
3870 642445d9 Iustin Pop
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3871 642445d9 Iustin Pop
                "please do a gnt-instance info to see the status of disks")
3872 a9e0c397 Iustin Pop
3873 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3874 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3875 a9e0c397 Iustin Pop
    # return value
3876 5bfac263 Iustin Pop
    self.proc.LogStep(5, steps_total, "sync devices")
3877 5bfac263 Iustin Pop
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3878 a9e0c397 Iustin Pop
3879 a9e0c397 Iustin Pop
    # so check manually all the devices
3880 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3881 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3882 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3883 a9e0c397 Iustin Pop
      if is_degr:
3884 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3885 a9e0c397 Iustin Pop
3886 5bfac263 Iustin Pop
    self.proc.LogStep(6, steps_total, "removing old storage")
3887 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3888 0834c866 Iustin Pop
      info("remove logical volumes for %s" % name)
3889 a9e0c397 Iustin Pop
      for lv in old_lvs:
3890 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3891 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3892 0834c866 Iustin Pop
          warning("Can't remove LV on old secondary",
3893 79caa9ed Guido Trotter
                  hint="Cleanup stale volumes by hand")
3894 a9e0c397 Iustin Pop
3895 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3896 a9e0c397 Iustin Pop
    """Execute disk replacement.
3897 a9e0c397 Iustin Pop

3898 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3899 a9e0c397 Iustin Pop

3900 a9e0c397 Iustin Pop
    """
3901 a9e0c397 Iustin Pop
    instance = self.instance
3902 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_REMOTE_RAID1:
3903 a9e0c397 Iustin Pop
      fn = self._ExecRR1
3904 a9e0c397 Iustin Pop
    elif instance.disk_template == constants.DT_DRBD8:
3905 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3906 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3907 a9e0c397 Iustin Pop
      else:
3908 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3909 a9e0c397 Iustin Pop
    else:
3910 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3911 a9e0c397 Iustin Pop
    return fn(feedback_fn)
3912 a9e0c397 Iustin Pop
3913 a8083063 Iustin Pop
3914 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3915 a8083063 Iustin Pop
  """Query runtime instance data.
3916 a8083063 Iustin Pop

3917 a8083063 Iustin Pop
  """
3918 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3919 a8083063 Iustin Pop
3920 a8083063 Iustin Pop
  def CheckPrereq(self):
3921 a8083063 Iustin Pop
    """Check prerequisites.
3922 a8083063 Iustin Pop

3923 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3924 a8083063 Iustin Pop

3925 a8083063 Iustin Pop
    """
3926 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3927 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3928 a8083063 Iustin Pop
    if self.op.instances:
3929 a8083063 Iustin Pop
      self.wanted_instances = []
3930 a8083063 Iustin Pop
      names = self.op.instances
3931 a8083063 Iustin Pop
      for name in names:
3932 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3933 a8083063 Iustin Pop
        if instance is None:
3934 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3935 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3936 a8083063 Iustin Pop
    else:
3937 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3938 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3939 a8083063 Iustin Pop
    return
3940 a8083063 Iustin Pop
3941 a8083063 Iustin Pop
3942 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3943 a8083063 Iustin Pop
    """Compute block device status.
3944 a8083063 Iustin Pop

3945 a8083063 Iustin Pop
    """
3946 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3947 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3948 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
3949 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3950 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3951 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3952 a8083063 Iustin Pop
      else:
3953 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3954 a8083063 Iustin Pop
3955 a8083063 Iustin Pop
    if snode:
3956 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3957 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3958 a8083063 Iustin Pop
    else:
3959 a8083063 Iustin Pop
      dev_sstatus = None
3960 a8083063 Iustin Pop
3961 a8083063 Iustin Pop
    if dev.children:
3962 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3963 a8083063 Iustin Pop
                      for child in dev.children]
3964 a8083063 Iustin Pop
    else:
3965 a8083063 Iustin Pop
      dev_children = []
3966 a8083063 Iustin Pop
3967 a8083063 Iustin Pop
    data = {
3968 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3969 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3970 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3971 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3972 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3973 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3974 a8083063 Iustin Pop
      "children": dev_children,
3975 a8083063 Iustin Pop
      }
3976 a8083063 Iustin Pop
3977 a8083063 Iustin Pop
    return data
3978 a8083063 Iustin Pop
3979 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3980 a8083063 Iustin Pop
    """Gather and return data"""
3981 a8083063 Iustin Pop
    result = {}
3982 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3983 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3984 a8083063 Iustin Pop
                                                instance.name)
3985 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3986 a8083063 Iustin Pop
        remote_state = "up"
3987 a8083063 Iustin Pop
      else:
3988 a8083063 Iustin Pop
        remote_state = "down"
3989 a8083063 Iustin Pop
      if instance.status == "down":
3990 a8083063 Iustin Pop
        config_state = "down"
3991 a8083063 Iustin Pop
      else:
3992 a8083063 Iustin Pop
        config_state = "up"
3993 a8083063 Iustin Pop
3994 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3995 a8083063 Iustin Pop
               for device in instance.disks]
3996 a8083063 Iustin Pop
3997 a8083063 Iustin Pop
      idict = {
3998 a8083063 Iustin Pop
        "name": instance.name,
3999 a8083063 Iustin Pop
        "config_state": config_state,
4000 a8083063 Iustin Pop
        "run_state": remote_state,
4001 a8083063 Iustin Pop
        "pnode": instance.primary_node,
4002 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
4003 a8083063 Iustin Pop
        "os": instance.os,
4004 a8083063 Iustin Pop
        "memory": instance.memory,
4005 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
4006 a8083063 Iustin Pop
        "disks": disks,
4007 58acb49d Alexander Schreiber
        "network_port": instance.network_port,
4008 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
4009 a8083063 Iustin Pop
        }
4010 a8083063 Iustin Pop
4011 a8083063 Iustin Pop
      result[instance.name] = idict
4012 a8083063 Iustin Pop
4013 a8083063 Iustin Pop
    return result
4014 a8083063 Iustin Pop
4015 a8083063 Iustin Pop
4016 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
4017 a8083063 Iustin Pop
  """Modifies an instances's parameters.
4018 a8083063 Iustin Pop

4019 a8083063 Iustin Pop
  """
4020 a8083063 Iustin Pop
  HPATH = "instance-modify"
4021 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4022 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
4023 a8083063 Iustin Pop
4024 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4025 a8083063 Iustin Pop
    """Build hooks env.
4026 a8083063 Iustin Pop

4027 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
4028 a8083063 Iustin Pop

4029 a8083063 Iustin Pop
    """
4030 396e1b78 Michael Hanselmann
    args = dict()
4031 a8083063 Iustin Pop
    if self.mem:
4032 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
4033 a8083063 Iustin Pop
    if self.vcpus:
4034 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
4035 396e1b78 Michael Hanselmann
    if self.do_ip or self.do_bridge:
4036 396e1b78 Michael Hanselmann
      if self.do_ip:
4037 396e1b78 Michael Hanselmann
        ip = self.ip
4038 396e1b78 Michael Hanselmann
      else:
4039 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
4040 396e1b78 Michael Hanselmann
      if self.bridge:
4041 396e1b78 Michael Hanselmann
        bridge = self.bridge
4042 396e1b78 Michael Hanselmann
      else:
4043 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
4044 396e1b78 Michael Hanselmann
      args['nics'] = [(ip, bridge)]
4045 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
4046 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
4047 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
4048 a8083063 Iustin Pop
    return env, nl, nl
4049 a8083063 Iustin Pop
4050 a8083063 Iustin Pop
  def CheckPrereq(self):
4051 a8083063 Iustin Pop
    """Check prerequisites.
4052 a8083063 Iustin Pop

4053 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
4054 a8083063 Iustin Pop

4055 a8083063 Iustin Pop
    """
4056 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
4057 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
4058 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
4059 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
4060 a8083063 Iustin Pop
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
4061 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
4062 a8083063 Iustin Pop
    if self.mem is not None:
4063 a8083063 Iustin Pop
      try:
4064 a8083063 Iustin Pop
        self.mem = int(self.mem)
4065 a8083063 Iustin Pop
      except ValueError, err:
4066 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
4067 a8083063 Iustin Pop
    if self.vcpus is not None:
4068 a8083063 Iustin Pop
      try:
4069 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
4070 a8083063 Iustin Pop
      except ValueError, err:
4071 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
4072 a8083063 Iustin Pop
    if self.ip is not None:
4073 a8083063 Iustin Pop
      self.do_ip = True
4074 a8083063 Iustin Pop
      if self.ip.lower() == "none":
4075 a8083063 Iustin Pop
        self.ip = None
4076 a8083063 Iustin Pop
      else:
4077 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
4078 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
4079 a8083063 Iustin Pop
    else:
4080 a8083063 Iustin Pop
      self.do_ip = False
4081 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
4082 a8083063 Iustin Pop
4083 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
4084 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
4085 a8083063 Iustin Pop
    if instance is None:
4086 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
4087 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4088 a8083063 Iustin Pop
    self.op.instance_name = instance.name
4089 a8083063 Iustin Pop
    self.instance = instance
4090 a8083063 Iustin Pop
    return
4091 a8083063 Iustin Pop
4092 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4093 a8083063 Iustin Pop
    """Modifies an instance.
4094 a8083063 Iustin Pop

4095 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
4096 a8083063 Iustin Pop
    """
4097 a8083063 Iustin Pop
    result = []
4098 a8083063 Iustin Pop
    instance = self.instance
4099 a8083063 Iustin Pop
    if self.mem:
4100 a8083063 Iustin Pop
      instance.memory = self.mem
4101 a8083063 Iustin Pop
      result.append(("mem", self.mem))
4102 a8083063 Iustin Pop
    if self.vcpus:
4103 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
4104 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
4105 a8083063 Iustin Pop
    if self.do_ip:
4106 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
4107 a8083063 Iustin Pop
      result.append(("ip", self.ip))
4108 a8083063 Iustin Pop
    if self.bridge:
4109 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
4110 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
4111 a8083063 Iustin Pop
4112 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
4113 a8083063 Iustin Pop
4114 a8083063 Iustin Pop
    return result
4115 a8083063 Iustin Pop
4116 a8083063 Iustin Pop
4117 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
4118 a8083063 Iustin Pop
  """Query the exports list
4119 a8083063 Iustin Pop

4120 a8083063 Iustin Pop
  """
4121 a8083063 Iustin Pop
  _OP_REQP = []
4122 a8083063 Iustin Pop
4123 a8083063 Iustin Pop
  def CheckPrereq(self):
4124 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
4125 a8083063 Iustin Pop

4126 a8083063 Iustin Pop
    """
4127 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
4128 a8083063 Iustin Pop
4129 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4130 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
4131 a8083063 Iustin Pop

4132 a8083063 Iustin Pop
    Returns:
4133 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
4134 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
4135 a8083063 Iustin Pop
      that node.
4136 a8083063 Iustin Pop

4137 a8083063 Iustin Pop
    """
4138 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
4139 a8083063 Iustin Pop
4140 a8083063 Iustin Pop
4141 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
4142 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
4143 a8083063 Iustin Pop

4144 a8083063 Iustin Pop
  """
4145 a8083063 Iustin Pop
  HPATH = "instance-export"
4146 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
4147 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
4148 a8083063 Iustin Pop
4149 a8083063 Iustin Pop
  def BuildHooksEnv(self):
4150 a8083063 Iustin Pop
    """Build hooks env.
4151 a8083063 Iustin Pop

4152 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
4153 a8083063 Iustin Pop

4154 a8083063 Iustin Pop
    """
4155 a8083063 Iustin Pop
    env = {
4156 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4157 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4158 a8083063 Iustin Pop
      }
4159 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4160 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4161 a8083063 Iustin Pop
          self.op.target_node]
4162 a8083063 Iustin Pop
    return env, nl, nl
4163 a8083063 Iustin Pop
4164 a8083063 Iustin Pop
  def CheckPrereq(self):
4165 a8083063 Iustin Pop
    """Check prerequisites.
4166 a8083063 Iustin Pop

4167 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
4168 a8083063 Iustin Pop

4169 a8083063 Iustin Pop
    """
4170 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4171 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4172 a8083063 Iustin Pop
    if self.instance is None:
4173 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4174 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4175 a8083063 Iustin Pop
4176 a8083063 Iustin Pop
    # node verification
4177 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4178 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4179 a8083063 Iustin Pop
4180 a8083063 Iustin Pop
    if self.dst_node is None:
4181 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4182 3ecf6786 Iustin Pop
                                 self.op.target_node)
4183 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4184 a8083063 Iustin Pop
4185 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4186 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4187 a8083063 Iustin Pop

4188 a8083063 Iustin Pop
    """
4189 a8083063 Iustin Pop
    instance = self.instance
4190 a8083063 Iustin Pop
    dst_node = self.dst_node
4191 a8083063 Iustin Pop
    src_node = instance.primary_node
4192 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
4193 a8083063 Iustin Pop
    if self.op.shutdown:
4194 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
4195 5bfac263 Iustin Pop
      self.proc.ChainOpCode(op)
4196 a8083063 Iustin Pop
4197 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4198 a8083063 Iustin Pop
4199 a8083063 Iustin Pop
    snap_disks = []
4200 a8083063 Iustin Pop
4201 a8083063 Iustin Pop
    try:
4202 a8083063 Iustin Pop
      for disk in instance.disks:
4203 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4204 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4205 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4206 a8083063 Iustin Pop
4207 a8083063 Iustin Pop
          if not new_dev_name:
4208 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4209 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4210 a8083063 Iustin Pop
          else:
4211 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4212 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4213 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4214 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4215 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4216 a8083063 Iustin Pop
4217 a8083063 Iustin Pop
    finally:
4218 a8083063 Iustin Pop
      if self.op.shutdown:
4219 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
4220 a8083063 Iustin Pop
                                       force=False)
4221 5bfac263 Iustin Pop
        self.proc.ChainOpCode(op)
4222 a8083063 Iustin Pop
4223 a8083063 Iustin Pop
    # TODO: check for size
4224 a8083063 Iustin Pop
4225 a8083063 Iustin Pop
    for dev in snap_disks:
4226 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
4227 a8083063 Iustin Pop
                                           instance):
4228 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
4229 a8083063 Iustin Pop
                     " %s to node %s" %
4230 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
4231 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4232 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
4233 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
4234 a8083063 Iustin Pop
4235 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4236 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4237 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4238 a8083063 Iustin Pop
4239 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4240 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4241 a8083063 Iustin Pop
4242 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4243 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4244 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4245 a8083063 Iustin Pop
    if nodelist:
4246 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4247 5bfac263 Iustin Pop
      exportlist = self.proc.ChainOpCode(op)
4248 a8083063 Iustin Pop
      for node in exportlist:
4249 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4250 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4251 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4252 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4253 5c947f38 Iustin Pop
4254 5c947f38 Iustin Pop
4255 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4256 5c947f38 Iustin Pop
  """Generic tags LU.
4257 5c947f38 Iustin Pop

4258 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4259 5c947f38 Iustin Pop

4260 5c947f38 Iustin Pop
  """
4261 5c947f38 Iustin Pop
  def CheckPrereq(self):
4262 5c947f38 Iustin Pop
    """Check prerequisites.
4263 5c947f38 Iustin Pop

4264 5c947f38 Iustin Pop
    """
4265 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4266 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4267 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4268 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4269 5c947f38 Iustin Pop
      if name is None:
4270 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4271 3ecf6786 Iustin Pop
                                   (self.op.name,))
4272 5c947f38 Iustin Pop
      self.op.name = name
4273 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4274 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4275 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4276 5c947f38 Iustin Pop
      if name is None:
4277 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4278 3ecf6786 Iustin Pop
                                   (self.op.name,))
4279 5c947f38 Iustin Pop
      self.op.name = name
4280 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4281 5c947f38 Iustin Pop
    else:
4282 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4283 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4284 5c947f38 Iustin Pop
4285 5c947f38 Iustin Pop
4286 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4287 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4288 5c947f38 Iustin Pop

4289 5c947f38 Iustin Pop
  """
4290 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4291 5c947f38 Iustin Pop
4292 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4293 5c947f38 Iustin Pop
    """Returns the tag list.
4294 5c947f38 Iustin Pop

4295 5c947f38 Iustin Pop
    """
4296 5c947f38 Iustin Pop
    return self.target.GetTags()
4297 5c947f38 Iustin Pop
4298 5c947f38 Iustin Pop
4299 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4300 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4301 73415719 Iustin Pop

4302 73415719 Iustin Pop
  """
4303 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4304 73415719 Iustin Pop
4305 73415719 Iustin Pop
  def CheckPrereq(self):
4306 73415719 Iustin Pop
    """Check prerequisites.
4307 73415719 Iustin Pop

4308 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4309 73415719 Iustin Pop

4310 73415719 Iustin Pop
    """
4311 73415719 Iustin Pop
    try:
4312 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4313 73415719 Iustin Pop
    except re.error, err:
4314 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4315 73415719 Iustin Pop
                                 (self.op.pattern, err))
4316 73415719 Iustin Pop
4317 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4318 73415719 Iustin Pop
    """Returns the tag list.
4319 73415719 Iustin Pop

4320 73415719 Iustin Pop
    """
4321 73415719 Iustin Pop
    cfg = self.cfg
4322 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4323 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4324 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4325 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4326 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4327 73415719 Iustin Pop
    results = []
4328 73415719 Iustin Pop
    for path, target in tgts:
4329 73415719 Iustin Pop
      for tag in target.GetTags():
4330 73415719 Iustin Pop
        if self.re.search(tag):
4331 73415719 Iustin Pop
          results.append((path, tag))
4332 73415719 Iustin Pop
    return results
4333 73415719 Iustin Pop
4334 73415719 Iustin Pop
4335 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4336 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4337 5c947f38 Iustin Pop

4338 5c947f38 Iustin Pop
  """
4339 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4340 5c947f38 Iustin Pop
4341 5c947f38 Iustin Pop
  def CheckPrereq(self):
4342 5c947f38 Iustin Pop
    """Check prerequisites.
4343 5c947f38 Iustin Pop

4344 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4345 5c947f38 Iustin Pop

4346 5c947f38 Iustin Pop
    """
4347 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4348 f27302fa Iustin Pop
    for tag in self.op.tags:
4349 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4350 5c947f38 Iustin Pop
4351 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4352 5c947f38 Iustin Pop
    """Sets the tag.
4353 5c947f38 Iustin Pop

4354 5c947f38 Iustin Pop
    """
4355 5c947f38 Iustin Pop
    try:
4356 f27302fa Iustin Pop
      for tag in self.op.tags:
4357 f27302fa Iustin Pop
        self.target.AddTag(tag)
4358 5c947f38 Iustin Pop
    except errors.TagError, err:
4359 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4360 5c947f38 Iustin Pop
    try:
4361 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4362 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4363 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4364 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4365 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4366 5c947f38 Iustin Pop
4367 5c947f38 Iustin Pop
4368 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4369 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4370 5c947f38 Iustin Pop

4371 5c947f38 Iustin Pop
  """
4372 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4373 5c947f38 Iustin Pop
4374 5c947f38 Iustin Pop
  def CheckPrereq(self):
4375 5c947f38 Iustin Pop
    """Check prerequisites.
4376 5c947f38 Iustin Pop

4377 5c947f38 Iustin Pop
    This checks that we have the given tag.
4378 5c947f38 Iustin Pop

4379 5c947f38 Iustin Pop
    """
4380 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4381 f27302fa Iustin Pop
    for tag in self.op.tags:
4382 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4383 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4384 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4385 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4386 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4387 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4388 f27302fa Iustin Pop
      diff_names.sort()
4389 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4390 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4391 5c947f38 Iustin Pop
4392 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4393 5c947f38 Iustin Pop
    """Remove the tag from the object.
4394 5c947f38 Iustin Pop

4395 5c947f38 Iustin Pop
    """
4396 f27302fa Iustin Pop
    for tag in self.op.tags:
4397 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4398 5c947f38 Iustin Pop
    try:
4399 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4400 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4401 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4402 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4403 3ecf6786 Iustin Pop
                                " aborted. Please retry.")