Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ bf6929a2

History | View | Annotate | Download (123.9 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import socket
30 a8083063 Iustin Pop
import time
31 a8083063 Iustin Pop
import tempfile
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 a8083063 Iustin Pop
35 a8083063 Iustin Pop
from ganeti import rpc
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 a8083063 Iustin Pop
from ganeti import config
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 a8083063 Iustin Pop
from ganeti import ssconf
46 a8083063 Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
52 a8083063 Iustin Pop
      with all the fields (even if as None)
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
57 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 a8083063 Iustin Pop
  REQ_CLUSTER = True
64 a8083063 Iustin Pop
  REQ_MASTER = True
65 a8083063 Iustin Pop
66 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
67 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
68 a8083063 Iustin Pop

69 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
70 a8083063 Iustin Pop
    validity.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    """
73 a8083063 Iustin Pop
    self.processor = processor
74 a8083063 Iustin Pop
    self.op = op
75 a8083063 Iustin Pop
    self.cfg = cfg
76 a8083063 Iustin Pop
    self.sstore = sstore
77 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
78 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
79 a8083063 Iustin Pop
      if attr_val is None:
80 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
81 3ecf6786 Iustin Pop
                                   attr_name)
82 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
83 a8083063 Iustin Pop
      if not cfg.IsCluster():
84 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
85 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
86 a8083063 Iustin Pop
      if self.REQ_MASTER:
87 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
88 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
89 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
90 3ecf6786 Iustin Pop
                                     " node %s" % master)
91 a8083063 Iustin Pop
92 a8083063 Iustin Pop
  def CheckPrereq(self):
93 a8083063 Iustin Pop
    """Check prerequisites for this LU.
94 a8083063 Iustin Pop

95 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
96 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
97 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
98 a8083063 Iustin Pop
    allowed.
99 a8083063 Iustin Pop

100 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
101 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
102 a8083063 Iustin Pop

103 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
104 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
105 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
106 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
107 a8083063 Iustin Pop

108 a8083063 Iustin Pop
    """
109 a8083063 Iustin Pop
    raise NotImplementedError
110 a8083063 Iustin Pop
111 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
112 a8083063 Iustin Pop
    """Execute the LU.
113 a8083063 Iustin Pop

114 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
115 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
116 a8083063 Iustin Pop
    code, or expected.
117 a8083063 Iustin Pop

118 a8083063 Iustin Pop
    """
119 a8083063 Iustin Pop
    raise NotImplementedError
120 a8083063 Iustin Pop
121 a8083063 Iustin Pop
  def BuildHooksEnv(self):
122 a8083063 Iustin Pop
    """Build hooks environment for this LU.
123 a8083063 Iustin Pop

124 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
125 a8083063 Iustin Pop
    containing the environment that will be used for running the
126 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
127 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
128 a8083063 Iustin Pop
    the hook should run after the execution.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
131 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
132 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
133 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
134 a8083063 Iustin Pop

135 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
136 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
137 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
138 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
139 a8083063 Iustin Pop
    None).
140 a8083063 Iustin Pop

141 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
142 a8083063 Iustin Pop
    not be called.
143 a8083063 Iustin Pop

144 a8083063 Iustin Pop
    """
145 a8083063 Iustin Pop
    raise NotImplementedError
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
148 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
149 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
150 a8083063 Iustin Pop

151 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
152 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
153 a8083063 Iustin Pop

154 a8083063 Iustin Pop
  """
155 a8083063 Iustin Pop
  HPATH = None
156 a8083063 Iustin Pop
  HTYPE = None
157 a8083063 Iustin Pop
158 a8083063 Iustin Pop
  def BuildHooksEnv(self):
159 a8083063 Iustin Pop
    """Build hooks env.
160 a8083063 Iustin Pop

161 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
162 a8083063 Iustin Pop

163 a8083063 Iustin Pop
    """
164 0e137c28 Iustin Pop
    return {}, [], []
165 a8083063 Iustin Pop
166 a8083063 Iustin Pop
167 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
168 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
169 83120a01 Michael Hanselmann

170 83120a01 Michael Hanselmann
  Args:
171 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
172 83120a01 Michael Hanselmann

173 83120a01 Michael Hanselmann
  """
174 3312b702 Iustin Pop
  if not isinstance(nodes, list):
175 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
176 dcb93971 Michael Hanselmann
177 dcb93971 Michael Hanselmann
  if nodes:
178 3312b702 Iustin Pop
    wanted = []
179 dcb93971 Michael Hanselmann
180 dcb93971 Michael Hanselmann
    for name in nodes:
181 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
182 dcb93971 Michael Hanselmann
      if node is None:
183 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
184 3312b702 Iustin Pop
      wanted.append(node)
185 dcb93971 Michael Hanselmann
186 dcb93971 Michael Hanselmann
  else:
187 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
188 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
189 3312b702 Iustin Pop
190 3312b702 Iustin Pop
191 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
192 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
193 3312b702 Iustin Pop

194 3312b702 Iustin Pop
  Args:
195 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
196 3312b702 Iustin Pop

197 3312b702 Iustin Pop
  """
198 3312b702 Iustin Pop
  if not isinstance(instances, list):
199 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
200 3312b702 Iustin Pop
201 3312b702 Iustin Pop
  if instances:
202 3312b702 Iustin Pop
    wanted = []
203 3312b702 Iustin Pop
204 3312b702 Iustin Pop
    for name in instances:
205 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
206 3312b702 Iustin Pop
      if instance is None:
207 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
208 3312b702 Iustin Pop
      wanted.append(instance)
209 3312b702 Iustin Pop
210 3312b702 Iustin Pop
  else:
211 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
212 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
213 dcb93971 Michael Hanselmann
214 dcb93971 Michael Hanselmann
215 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
216 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
217 83120a01 Michael Hanselmann

218 83120a01 Michael Hanselmann
  Args:
219 83120a01 Michael Hanselmann
    static: Static fields
220 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
221 83120a01 Michael Hanselmann

222 83120a01 Michael Hanselmann
  """
223 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
224 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
225 dcb93971 Michael Hanselmann
226 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
227 dcb93971 Michael Hanselmann
228 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
229 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
230 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
231 3ecf6786 Iustin Pop
                                          difference(all_fields)))
232 dcb93971 Michael Hanselmann
233 dcb93971 Michael Hanselmann
234 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
235 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
236 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
237 ecb215b5 Michael Hanselmann

238 ecb215b5 Michael Hanselmann
  Args:
239 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
240 396e1b78 Michael Hanselmann
  """
241 396e1b78 Michael Hanselmann
  env = {
242 0e137c28 Iustin Pop
    "OP_TARGET": name,
243 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
244 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
245 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
246 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
247 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
248 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
249 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
250 396e1b78 Michael Hanselmann
  }
251 396e1b78 Michael Hanselmann
252 396e1b78 Michael Hanselmann
  if nics:
253 396e1b78 Michael Hanselmann
    nic_count = len(nics)
254 396e1b78 Michael Hanselmann
    for idx, (ip, bridge) in enumerate(nics):
255 396e1b78 Michael Hanselmann
      if ip is None:
256 396e1b78 Michael Hanselmann
        ip = ""
257 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
258 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
259 396e1b78 Michael Hanselmann
  else:
260 396e1b78 Michael Hanselmann
    nic_count = 0
261 396e1b78 Michael Hanselmann
262 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
263 396e1b78 Michael Hanselmann
264 396e1b78 Michael Hanselmann
  return env
265 396e1b78 Michael Hanselmann
266 396e1b78 Michael Hanselmann
267 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
268 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
269 ecb215b5 Michael Hanselmann

270 ecb215b5 Michael Hanselmann
  Args:
271 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
272 ecb215b5 Michael Hanselmann
    override: dict of values to override
273 ecb215b5 Michael Hanselmann
  """
274 396e1b78 Michael Hanselmann
  args = {
275 396e1b78 Michael Hanselmann
    'name': instance.name,
276 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
277 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
278 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
279 396e1b78 Michael Hanselmann
    'status': instance.os,
280 396e1b78 Michael Hanselmann
    'memory': instance.memory,
281 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
282 396e1b78 Michael Hanselmann
    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
283 396e1b78 Michael Hanselmann
  }
284 396e1b78 Michael Hanselmann
  if override:
285 396e1b78 Michael Hanselmann
    args.update(override)
286 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
287 396e1b78 Michael Hanselmann
288 396e1b78 Michael Hanselmann
289 a8083063 Iustin Pop
def _UpdateEtcHosts(fullnode, ip):
290 a8083063 Iustin Pop
  """Ensure a node has a correct entry in /etc/hosts.
291 a8083063 Iustin Pop

292 a8083063 Iustin Pop
  Args:
293 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
294 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
295 a8083063 Iustin Pop

296 a8083063 Iustin Pop
  """
297 a8083063 Iustin Pop
  node = fullnode.split(".", 1)[0]
298 a8083063 Iustin Pop
299 a8083063 Iustin Pop
  f = open('/etc/hosts', 'r+')
300 a8083063 Iustin Pop
301 a8083063 Iustin Pop
  inthere = False
302 a8083063 Iustin Pop
303 a8083063 Iustin Pop
  save_lines = []
304 a8083063 Iustin Pop
  add_lines = []
305 a8083063 Iustin Pop
  removed = False
306 a8083063 Iustin Pop
307 a8083063 Iustin Pop
  while True:
308 a8083063 Iustin Pop
    rawline = f.readline()
309 a8083063 Iustin Pop
310 a8083063 Iustin Pop
    if not rawline:
311 a8083063 Iustin Pop
      # End of file
312 a8083063 Iustin Pop
      break
313 a8083063 Iustin Pop
314 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
315 a8083063 Iustin Pop
316 a8083063 Iustin Pop
    # Strip off comments
317 a8083063 Iustin Pop
    line = line.split('#')[0]
318 a8083063 Iustin Pop
319 a8083063 Iustin Pop
    if not line:
320 a8083063 Iustin Pop
      # Entire line was comment, skip
321 a8083063 Iustin Pop
      save_lines.append(rawline)
322 a8083063 Iustin Pop
      continue
323 a8083063 Iustin Pop
324 a8083063 Iustin Pop
    fields = line.split()
325 a8083063 Iustin Pop
326 a8083063 Iustin Pop
    haveall = True
327 a8083063 Iustin Pop
    havesome = False
328 a8083063 Iustin Pop
    for spec in [ ip, fullnode, node ]:
329 a8083063 Iustin Pop
      if spec not in fields:
330 a8083063 Iustin Pop
        haveall = False
331 a8083063 Iustin Pop
      if spec in fields:
332 a8083063 Iustin Pop
        havesome = True
333 a8083063 Iustin Pop
334 a8083063 Iustin Pop
    if haveall:
335 a8083063 Iustin Pop
      inthere = True
336 a8083063 Iustin Pop
      save_lines.append(rawline)
337 a8083063 Iustin Pop
      continue
338 a8083063 Iustin Pop
339 a8083063 Iustin Pop
    if havesome and not haveall:
340 a8083063 Iustin Pop
      # Line (old, or manual?) which is missing some.  Remove.
341 a8083063 Iustin Pop
      removed = True
342 a8083063 Iustin Pop
      continue
343 a8083063 Iustin Pop
344 a8083063 Iustin Pop
    save_lines.append(rawline)
345 a8083063 Iustin Pop
346 a8083063 Iustin Pop
  if not inthere:
347 a8083063 Iustin Pop
    add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
348 a8083063 Iustin Pop
349 a8083063 Iustin Pop
  if removed:
350 a8083063 Iustin Pop
    if add_lines:
351 a8083063 Iustin Pop
      save_lines = save_lines + add_lines
352 a8083063 Iustin Pop
353 a8083063 Iustin Pop
    # We removed a line, write a new file and replace old.
354 a8083063 Iustin Pop
    fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc')
355 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
356 a8083063 Iustin Pop
    newfile.write(''.join(save_lines))
357 a8083063 Iustin Pop
    newfile.close()
358 a8083063 Iustin Pop
    os.rename(tmpname, '/etc/hosts')
359 a8083063 Iustin Pop
360 a8083063 Iustin Pop
  elif add_lines:
361 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
362 a8083063 Iustin Pop
    f.seek(0, 2)
363 a8083063 Iustin Pop
    for add in add_lines:
364 a8083063 Iustin Pop
      f.write(add)
365 a8083063 Iustin Pop
366 a8083063 Iustin Pop
  f.close()
367 a8083063 Iustin Pop
368 a8083063 Iustin Pop
369 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
370 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
371 a8083063 Iustin Pop

372 a8083063 Iustin Pop
  Args:
373 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
374 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
375 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
376 a8083063 Iustin Pop

377 a8083063 Iustin Pop
  """
378 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
379 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
380 a8083063 Iustin Pop
  else:
381 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
382 a8083063 Iustin Pop
383 a8083063 Iustin Pop
  inthere = False
384 a8083063 Iustin Pop
385 a8083063 Iustin Pop
  save_lines = []
386 a8083063 Iustin Pop
  add_lines = []
387 a8083063 Iustin Pop
  removed = False
388 a8083063 Iustin Pop
389 a8083063 Iustin Pop
  while True:
390 a8083063 Iustin Pop
    rawline = f.readline()
391 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
392 a8083063 Iustin Pop
393 a8083063 Iustin Pop
    if not rawline:
394 a8083063 Iustin Pop
      # End of file
395 a8083063 Iustin Pop
      break
396 a8083063 Iustin Pop
397 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
398 a8083063 Iustin Pop
399 a8083063 Iustin Pop
    parts = line.split(' ')
400 a8083063 Iustin Pop
    fields = parts[0].split(',')
401 a8083063 Iustin Pop
    key = parts[2]
402 a8083063 Iustin Pop
403 a8083063 Iustin Pop
    haveall = True
404 a8083063 Iustin Pop
    havesome = False
405 a8083063 Iustin Pop
    for spec in [ ip, fullnode ]:
406 a8083063 Iustin Pop
      if spec not in fields:
407 a8083063 Iustin Pop
        haveall = False
408 a8083063 Iustin Pop
      if spec in fields:
409 a8083063 Iustin Pop
        havesome = True
410 a8083063 Iustin Pop
411 a8083063 Iustin Pop
    logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
412 a8083063 Iustin Pop
    if haveall and key == pubkey:
413 a8083063 Iustin Pop
      inthere = True
414 a8083063 Iustin Pop
      save_lines.append(rawline)
415 a8083063 Iustin Pop
      logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
416 a8083063 Iustin Pop
      continue
417 a8083063 Iustin Pop
418 a8083063 Iustin Pop
    if havesome and (not haveall or key != pubkey):
419 a8083063 Iustin Pop
      removed = True
420 a8083063 Iustin Pop
      logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
421 a8083063 Iustin Pop
      continue
422 a8083063 Iustin Pop
423 a8083063 Iustin Pop
    save_lines.append(rawline)
424 a8083063 Iustin Pop
425 a8083063 Iustin Pop
  if not inthere:
426 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
427 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
428 a8083063 Iustin Pop
429 a8083063 Iustin Pop
  if removed:
430 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
431 a8083063 Iustin Pop
432 a8083063 Iustin Pop
    # Write a new file and replace old.
433 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
434 82122173 Iustin Pop
                                   constants.DATA_DIR)
435 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
436 82122173 Iustin Pop
    try:
437 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
438 82122173 Iustin Pop
    finally:
439 82122173 Iustin Pop
      newfile.close()
440 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
441 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
442 a8083063 Iustin Pop
443 a8083063 Iustin Pop
  elif add_lines:
444 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
445 a8083063 Iustin Pop
    f.seek(0, 2)
446 a8083063 Iustin Pop
    for add in add_lines:
447 a8083063 Iustin Pop
      f.write(add)
448 a8083063 Iustin Pop
449 a8083063 Iustin Pop
  f.close()
450 a8083063 Iustin Pop
451 a8083063 Iustin Pop
452 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
453 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
454 a8083063 Iustin Pop

455 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
456 a8083063 Iustin Pop
  is the error message.
457 a8083063 Iustin Pop

458 a8083063 Iustin Pop
  """
459 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
460 a8083063 Iustin Pop
  if vgsize is None:
461 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
462 a8083063 Iustin Pop
  elif vgsize < 20480:
463 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
464 191a8385 Guido Trotter
            (vgname, vgsize))
465 a8083063 Iustin Pop
  return None
466 a8083063 Iustin Pop
467 a8083063 Iustin Pop
468 a8083063 Iustin Pop
def _InitSSHSetup(node):
469 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
470 a8083063 Iustin Pop

471 a8083063 Iustin Pop

472 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
473 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
474 a8083063 Iustin Pop

475 a8083063 Iustin Pop
  Args:
476 a8083063 Iustin Pop
    node: the name of this host as a fqdn
477 a8083063 Iustin Pop

478 a8083063 Iustin Pop
  """
479 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
480 a8083063 Iustin Pop
481 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
482 70d9e3d8 Iustin Pop
    if os.path.exists(name):
483 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
484 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
485 a8083063 Iustin Pop
486 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
487 70d9e3d8 Iustin Pop
                         "-f", priv_key,
488 a8083063 Iustin Pop
                         "-q", "-N", ""])
489 a8083063 Iustin Pop
  if result.failed:
490 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
491 3ecf6786 Iustin Pop
                             result.output)
492 a8083063 Iustin Pop
493 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
494 a8083063 Iustin Pop
  try:
495 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
496 a8083063 Iustin Pop
  finally:
497 a8083063 Iustin Pop
    f.close()
498 a8083063 Iustin Pop
499 a8083063 Iustin Pop
500 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
501 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
502 a8083063 Iustin Pop

503 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
504 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
505 a8083063 Iustin Pop

506 a8083063 Iustin Pop
  """
507 a8083063 Iustin Pop
  # Create pseudo random password
508 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
509 a8083063 Iustin Pop
  # and write it into sstore
510 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
511 a8083063 Iustin Pop
512 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
513 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
514 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
515 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
516 a8083063 Iustin Pop
  if result.failed:
517 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
518 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
519 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
520 a8083063 Iustin Pop
521 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
522 a8083063 Iustin Pop
523 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
524 a8083063 Iustin Pop
525 a8083063 Iustin Pop
  if result.failed:
526 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
527 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
528 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
529 a8083063 Iustin Pop
530 a8083063 Iustin Pop
531 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
532 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
533 bf6929a2 Alexander Schreiber

534 bf6929a2 Alexander Schreiber
  """
535 bf6929a2 Alexander Schreiber
  # check bridges existance
536 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
537 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
538 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
539 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
540 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
541 bf6929a2 Alexander Schreiber
542 bf6929a2 Alexander Schreiber
543 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
544 a8083063 Iustin Pop
  """Initialise the cluster.
545 a8083063 Iustin Pop

546 a8083063 Iustin Pop
  """
547 a8083063 Iustin Pop
  HPATH = "cluster-init"
548 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
549 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
550 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
551 a8083063 Iustin Pop
  REQ_CLUSTER = False
552 a8083063 Iustin Pop
553 a8083063 Iustin Pop
  def BuildHooksEnv(self):
554 a8083063 Iustin Pop
    """Build hooks env.
555 a8083063 Iustin Pop

556 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
557 a8083063 Iustin Pop
    ourselves in the post-run node list.
558 a8083063 Iustin Pop

559 a8083063 Iustin Pop
    """
560 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
561 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
562 a8083063 Iustin Pop
563 a8083063 Iustin Pop
  def CheckPrereq(self):
564 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
565 a8083063 Iustin Pop

566 a8083063 Iustin Pop
    """
567 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
568 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
569 a8083063 Iustin Pop
570 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
571 ff98055b Iustin Pop
572 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
573 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
574 130e907e Iustin Pop
                                 " range (%s). Please fix DNS or /etc/hosts." %
575 bcf043c9 Iustin Pop
                                 (hostname.ip,))
576 130e907e Iustin Pop
577 89e1fc26 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
578 a8083063 Iustin Pop
579 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, hostname.ip,
580 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
581 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
582 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
583 3ecf6786 Iustin Pop
                                 " belong to this host."
584 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
585 a8083063 Iustin Pop
586 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
587 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
588 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
589 16abfbc2 Alexander Schreiber
    if (secondary_ip and
590 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
591 16abfbc2 Alexander Schreiber
        (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
592 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT))):
593 16abfbc2 Alexander Schreiber
      raise errors.OpPrereqError("You gave %s as secondary IP,\n"
594 16abfbc2 Alexander Schreiber
                                 "but it does not belong to this host." %
595 16abfbc2 Alexander Schreiber
                                 secondary_ip)
596 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
597 a8083063 Iustin Pop
598 a8083063 Iustin Pop
    # checks presence of the volume group given
599 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
600 a8083063 Iustin Pop
601 a8083063 Iustin Pop
    if vgstatus:
602 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
603 a8083063 Iustin Pop
604 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
605 a8083063 Iustin Pop
                    self.op.mac_prefix):
606 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
607 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
608 a8083063 Iustin Pop
609 a8083063 Iustin Pop
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
610 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
611 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
612 a8083063 Iustin Pop
613 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
614 880478f8 Iustin Pop
    if result.failed:
615 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
616 8925faaa Iustin Pop
                                 (self.op.master_netdev,
617 8925faaa Iustin Pop
                                  result.output.strip()))
618 880478f8 Iustin Pop
619 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
620 a8083063 Iustin Pop
    """Initialize the cluster.
621 a8083063 Iustin Pop

622 a8083063 Iustin Pop
    """
623 a8083063 Iustin Pop
    clustername = self.clustername
624 a8083063 Iustin Pop
    hostname = self.hostname
625 a8083063 Iustin Pop
626 a8083063 Iustin Pop
    # set up the simple store
627 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
628 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
629 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
630 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
631 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
632 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
633 a8083063 Iustin Pop
634 a8083063 Iustin Pop
    # set up the inter-node password and certificate
635 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
636 a8083063 Iustin Pop
637 a8083063 Iustin Pop
    # start the master ip
638 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
639 a8083063 Iustin Pop
640 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
641 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
642 a8083063 Iustin Pop
    try:
643 a8083063 Iustin Pop
      sshline = f.read()
644 a8083063 Iustin Pop
    finally:
645 a8083063 Iustin Pop
      f.close()
646 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
647 a8083063 Iustin Pop
648 bcf043c9 Iustin Pop
    _UpdateEtcHosts(hostname.name, hostname.ip)
649 a8083063 Iustin Pop
650 bcf043c9 Iustin Pop
    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
651 a8083063 Iustin Pop
652 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
653 a8083063 Iustin Pop
654 a8083063 Iustin Pop
    # init of cluster config file
655 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
656 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
657 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
658 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
659 a8083063 Iustin Pop
660 a8083063 Iustin Pop
661 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
662 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
663 a8083063 Iustin Pop

664 a8083063 Iustin Pop
  """
665 a8083063 Iustin Pop
  _OP_REQP = []
666 a8083063 Iustin Pop
667 a8083063 Iustin Pop
  def CheckPrereq(self):
668 a8083063 Iustin Pop
    """Check prerequisites.
669 a8083063 Iustin Pop

670 a8083063 Iustin Pop
    This checks whether the cluster is empty.
671 a8083063 Iustin Pop

672 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
673 a8083063 Iustin Pop

674 a8083063 Iustin Pop
    """
675 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
676 a8083063 Iustin Pop
677 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
678 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
679 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
680 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
681 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
682 db915bd1 Michael Hanselmann
    if instancelist:
683 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
684 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
685 a8083063 Iustin Pop
686 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
687 a8083063 Iustin Pop
    """Destroys the cluster.
688 a8083063 Iustin Pop

689 a8083063 Iustin Pop
    """
690 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
691 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
692 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
693 880478f8 Iustin Pop
    rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
694 a8083063 Iustin Pop
695 a8083063 Iustin Pop
696 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
697 a8083063 Iustin Pop
  """Verifies the cluster status.
698 a8083063 Iustin Pop

699 a8083063 Iustin Pop
  """
700 a8083063 Iustin Pop
  _OP_REQP = []
701 a8083063 Iustin Pop
702 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
703 a8083063 Iustin Pop
                  remote_version, feedback_fn):
704 a8083063 Iustin Pop
    """Run multiple tests against a node.
705 a8083063 Iustin Pop

706 a8083063 Iustin Pop
    Test list:
707 a8083063 Iustin Pop
      - compares ganeti version
708 a8083063 Iustin Pop
      - checks vg existance and size > 20G
709 a8083063 Iustin Pop
      - checks config file checksum
710 a8083063 Iustin Pop
      - checks ssh to other nodes
711 a8083063 Iustin Pop

712 a8083063 Iustin Pop
    Args:
713 a8083063 Iustin Pop
      node: name of the node to check
714 a8083063 Iustin Pop
      file_list: required list of files
715 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
716 098c0958 Michael Hanselmann

717 a8083063 Iustin Pop
    """
718 a8083063 Iustin Pop
    # compares ganeti version
719 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
720 a8083063 Iustin Pop
    if not remote_version:
721 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
722 a8083063 Iustin Pop
      return True
723 a8083063 Iustin Pop
724 a8083063 Iustin Pop
    if local_version != remote_version:
725 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
726 a8083063 Iustin Pop
                      (local_version, node, remote_version))
727 a8083063 Iustin Pop
      return True
728 a8083063 Iustin Pop
729 a8083063 Iustin Pop
    # checks vg existance and size > 20G
730 a8083063 Iustin Pop
731 a8083063 Iustin Pop
    bad = False
732 a8083063 Iustin Pop
    if not vglist:
733 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
734 a8083063 Iustin Pop
                      (node,))
735 a8083063 Iustin Pop
      bad = True
736 a8083063 Iustin Pop
    else:
737 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
738 a8083063 Iustin Pop
      if vgstatus:
739 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
740 a8083063 Iustin Pop
        bad = True
741 a8083063 Iustin Pop
742 a8083063 Iustin Pop
    # checks config file checksum
743 a8083063 Iustin Pop
    # checks ssh to any
744 a8083063 Iustin Pop
745 a8083063 Iustin Pop
    if 'filelist' not in node_result:
746 a8083063 Iustin Pop
      bad = True
747 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
748 a8083063 Iustin Pop
    else:
749 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
750 a8083063 Iustin Pop
      for file_name in file_list:
751 a8083063 Iustin Pop
        if file_name not in remote_cksum:
752 a8083063 Iustin Pop
          bad = True
753 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
754 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
755 a8083063 Iustin Pop
          bad = True
756 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
757 a8083063 Iustin Pop
758 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
759 a8083063 Iustin Pop
      bad = True
760 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
761 a8083063 Iustin Pop
    else:
762 a8083063 Iustin Pop
      if node_result['nodelist']:
763 a8083063 Iustin Pop
        bad = True
764 a8083063 Iustin Pop
        for node in node_result['nodelist']:
765 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
766 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
767 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
768 a8083063 Iustin Pop
    if hyp_result is not None:
769 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
770 a8083063 Iustin Pop
    return bad
771 a8083063 Iustin Pop
772 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
773 a8083063 Iustin Pop
    """Verify an instance.
774 a8083063 Iustin Pop

775 a8083063 Iustin Pop
    This function checks to see if the required block devices are
776 a8083063 Iustin Pop
    available on the instance's node.
777 a8083063 Iustin Pop

778 a8083063 Iustin Pop
    """
779 a8083063 Iustin Pop
    bad = False
780 a8083063 Iustin Pop
781 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
782 a8083063 Iustin Pop
    if not instance in instancelist:
783 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
784 a8083063 Iustin Pop
                      (instance, instancelist))
785 a8083063 Iustin Pop
      bad = True
786 a8083063 Iustin Pop
787 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
788 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
789 a8083063 Iustin Pop
790 a8083063 Iustin Pop
    node_vol_should = {}
791 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
792 a8083063 Iustin Pop
793 a8083063 Iustin Pop
    for node in node_vol_should:
794 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
795 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
796 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
797 a8083063 Iustin Pop
                          (volume, node))
798 a8083063 Iustin Pop
          bad = True
799 a8083063 Iustin Pop
800 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
801 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
802 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
803 a8083063 Iustin Pop
                        (instance, node_current))
804 a8083063 Iustin Pop
        bad = True
805 a8083063 Iustin Pop
806 a8083063 Iustin Pop
    for node in node_instance:
807 a8083063 Iustin Pop
      if (not node == node_current):
808 a8083063 Iustin Pop
        if instance in node_instance[node]:
809 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
810 a8083063 Iustin Pop
                          (instance, node))
811 a8083063 Iustin Pop
          bad = True
812 a8083063 Iustin Pop
813 6a438c98 Michael Hanselmann
    return bad
814 a8083063 Iustin Pop
815 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
816 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
817 a8083063 Iustin Pop

818 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
819 a8083063 Iustin Pop
    reported as unknown.
820 a8083063 Iustin Pop

821 a8083063 Iustin Pop
    """
822 a8083063 Iustin Pop
    bad = False
823 a8083063 Iustin Pop
824 a8083063 Iustin Pop
    for node in node_vol_is:
825 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
826 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
827 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
828 a8083063 Iustin Pop
                      (volume, node))
829 a8083063 Iustin Pop
          bad = True
830 a8083063 Iustin Pop
    return bad
831 a8083063 Iustin Pop
832 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
833 a8083063 Iustin Pop
    """Verify the list of running instances.
834 a8083063 Iustin Pop

835 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
836 a8083063 Iustin Pop

837 a8083063 Iustin Pop
    """
838 a8083063 Iustin Pop
    bad = False
839 a8083063 Iustin Pop
    for node in node_instance:
840 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
841 a8083063 Iustin Pop
        if runninginstance not in instancelist:
842 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
843 a8083063 Iustin Pop
                          (runninginstance, node))
844 a8083063 Iustin Pop
          bad = True
845 a8083063 Iustin Pop
    return bad
846 a8083063 Iustin Pop
847 a8083063 Iustin Pop
  def CheckPrereq(self):
848 a8083063 Iustin Pop
    """Check prerequisites.
849 a8083063 Iustin Pop

850 a8083063 Iustin Pop
    This has no prerequisites.
851 a8083063 Iustin Pop

852 a8083063 Iustin Pop
    """
853 a8083063 Iustin Pop
    pass
854 a8083063 Iustin Pop
855 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
856 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
857 a8083063 Iustin Pop

858 a8083063 Iustin Pop
    """
859 a8083063 Iustin Pop
    bad = False
860 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
861 a8083063 Iustin Pop
    self.cfg.VerifyConfig()
862 a8083063 Iustin Pop
863 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
864 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
865 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
866 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
867 a8083063 Iustin Pop
    node_volume = {}
868 a8083063 Iustin Pop
    node_instance = {}
869 a8083063 Iustin Pop
870 a8083063 Iustin Pop
    # FIXME: verify OS list
871 a8083063 Iustin Pop
    # do local checksums
872 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
873 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
874 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
875 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
876 a8083063 Iustin Pop
877 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
878 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
879 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
880 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
881 a8083063 Iustin Pop
    node_verify_param = {
882 a8083063 Iustin Pop
      'filelist': file_names,
883 a8083063 Iustin Pop
      'nodelist': nodelist,
884 a8083063 Iustin Pop
      'hypervisor': None,
885 a8083063 Iustin Pop
      }
886 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
887 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
888 a8083063 Iustin Pop
889 a8083063 Iustin Pop
    for node in nodelist:
890 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
891 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
892 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
893 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
894 a8083063 Iustin Pop
      bad = bad or result
895 a8083063 Iustin Pop
896 a8083063 Iustin Pop
      # node_volume
897 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
898 a8083063 Iustin Pop
899 a8083063 Iustin Pop
      if type(volumeinfo) != dict:
900 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
901 a8083063 Iustin Pop
        bad = True
902 a8083063 Iustin Pop
        continue
903 a8083063 Iustin Pop
904 a8083063 Iustin Pop
      node_volume[node] = volumeinfo
905 a8083063 Iustin Pop
906 a8083063 Iustin Pop
      # node_instance
907 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
908 a8083063 Iustin Pop
      if type(nodeinstance) != list:
909 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
910 a8083063 Iustin Pop
        bad = True
911 a8083063 Iustin Pop
        continue
912 a8083063 Iustin Pop
913 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
914 a8083063 Iustin Pop
915 a8083063 Iustin Pop
    node_vol_should = {}
916 a8083063 Iustin Pop
917 a8083063 Iustin Pop
    for instance in instancelist:
918 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
919 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
920 a8083063 Iustin Pop
                                     feedback_fn)
921 a8083063 Iustin Pop
      bad = bad or result
922 a8083063 Iustin Pop
923 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
924 a8083063 Iustin Pop
925 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
926 a8083063 Iustin Pop
927 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
928 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
929 a8083063 Iustin Pop
                                       feedback_fn)
930 a8083063 Iustin Pop
    bad = bad or result
931 a8083063 Iustin Pop
932 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
933 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
934 a8083063 Iustin Pop
                                         feedback_fn)
935 a8083063 Iustin Pop
    bad = bad or result
936 a8083063 Iustin Pop
937 a8083063 Iustin Pop
    return int(bad)
938 a8083063 Iustin Pop
939 a8083063 Iustin Pop
940 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
941 07bd8a51 Iustin Pop
  """Rename the cluster.
942 07bd8a51 Iustin Pop

943 07bd8a51 Iustin Pop
  """
944 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
945 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
946 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
947 07bd8a51 Iustin Pop
948 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
949 07bd8a51 Iustin Pop
    """Build hooks env.
950 07bd8a51 Iustin Pop

951 07bd8a51 Iustin Pop
    """
952 07bd8a51 Iustin Pop
    env = {
953 0e137c28 Iustin Pop
      "OP_TARGET": self.op.sstore.GetClusterName(),
954 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
955 07bd8a51 Iustin Pop
      }
956 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
957 07bd8a51 Iustin Pop
    return env, [mn], [mn]
958 07bd8a51 Iustin Pop
959 07bd8a51 Iustin Pop
  def CheckPrereq(self):
960 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
961 07bd8a51 Iustin Pop

962 07bd8a51 Iustin Pop
    """
963 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
964 07bd8a51 Iustin Pop
965 bcf043c9 Iustin Pop
    new_name = hostname.name
966 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
967 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
968 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
969 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
970 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
971 07bd8a51 Iustin Pop
                                 " cluster has changed")
972 07bd8a51 Iustin Pop
    if new_ip != old_ip:
973 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
974 07bd8a51 Iustin Pop
      if not result.failed:
975 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
976 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
977 07bd8a51 Iustin Pop
                                   new_ip)
978 07bd8a51 Iustin Pop
979 07bd8a51 Iustin Pop
    self.op.name = new_name
980 07bd8a51 Iustin Pop
981 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
982 07bd8a51 Iustin Pop
    """Rename the cluster.
983 07bd8a51 Iustin Pop

984 07bd8a51 Iustin Pop
    """
985 07bd8a51 Iustin Pop
    clustername = self.op.name
986 07bd8a51 Iustin Pop
    ip = self.ip
987 07bd8a51 Iustin Pop
    ss = self.sstore
988 07bd8a51 Iustin Pop
989 07bd8a51 Iustin Pop
    # shutdown the master IP
990 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
991 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
992 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
993 07bd8a51 Iustin Pop
994 07bd8a51 Iustin Pop
    try:
995 07bd8a51 Iustin Pop
      # modify the sstore
996 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
997 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
998 07bd8a51 Iustin Pop
999 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1000 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1001 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1002 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1003 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1004 07bd8a51 Iustin Pop
1005 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1006 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1007 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1008 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1009 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1010 07bd8a51 Iustin Pop
          if not result[to_node]:
1011 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1012 07bd8a51 Iustin Pop
                         (fname, to_node))
1013 07bd8a51 Iustin Pop
    finally:
1014 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
1015 07bd8a51 Iustin Pop
        logger.Error("Could not re-enable the master role on the master,\n"
1016 07bd8a51 Iustin Pop
                     "please restart manually.")
1017 07bd8a51 Iustin Pop
1018 07bd8a51 Iustin Pop
1019 a8083063 Iustin Pop
def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
1020 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1021 a8083063 Iustin Pop

1022 a8083063 Iustin Pop
  """
1023 a8083063 Iustin Pop
  if not instance.disks:
1024 a8083063 Iustin Pop
    return True
1025 a8083063 Iustin Pop
1026 a8083063 Iustin Pop
  if not oneshot:
1027 a8083063 Iustin Pop
    logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
1028 a8083063 Iustin Pop
1029 a8083063 Iustin Pop
  node = instance.primary_node
1030 a8083063 Iustin Pop
1031 a8083063 Iustin Pop
  for dev in instance.disks:
1032 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1033 a8083063 Iustin Pop
1034 a8083063 Iustin Pop
  retries = 0
1035 a8083063 Iustin Pop
  while True:
1036 a8083063 Iustin Pop
    max_time = 0
1037 a8083063 Iustin Pop
    done = True
1038 a8083063 Iustin Pop
    cumul_degraded = False
1039 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1040 a8083063 Iustin Pop
    if not rstats:
1041 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1042 a8083063 Iustin Pop
      retries += 1
1043 a8083063 Iustin Pop
      if retries >= 10:
1044 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1045 3ecf6786 Iustin Pop
                                 " aborting." % node)
1046 a8083063 Iustin Pop
      time.sleep(6)
1047 a8083063 Iustin Pop
      continue
1048 a8083063 Iustin Pop
    retries = 0
1049 a8083063 Iustin Pop
    for i in range(len(rstats)):
1050 a8083063 Iustin Pop
      mstat = rstats[i]
1051 a8083063 Iustin Pop
      if mstat is None:
1052 a8083063 Iustin Pop
        logger.ToStderr("Can't compute data for node %s/%s" %
1053 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1054 a8083063 Iustin Pop
        continue
1055 a8083063 Iustin Pop
      perc_done, est_time, is_degraded = mstat
1056 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1057 a8083063 Iustin Pop
      if perc_done is not None:
1058 a8083063 Iustin Pop
        done = False
1059 a8083063 Iustin Pop
        if est_time is not None:
1060 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1061 a8083063 Iustin Pop
          max_time = est_time
1062 a8083063 Iustin Pop
        else:
1063 a8083063 Iustin Pop
          rem_time = "no time estimate"
1064 a8083063 Iustin Pop
        logger.ToStdout("- device %s: %5.2f%% done, %s" %
1065 a8083063 Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1066 a8083063 Iustin Pop
    if done or oneshot:
1067 a8083063 Iustin Pop
      break
1068 a8083063 Iustin Pop
1069 a8083063 Iustin Pop
    if unlock:
1070 a8083063 Iustin Pop
      utils.Unlock('cmd')
1071 a8083063 Iustin Pop
    try:
1072 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1073 a8083063 Iustin Pop
    finally:
1074 a8083063 Iustin Pop
      if unlock:
1075 a8083063 Iustin Pop
        utils.Lock('cmd')
1076 a8083063 Iustin Pop
1077 a8083063 Iustin Pop
  if done:
1078 a8083063 Iustin Pop
    logger.ToStdout("Instance %s's disks are in sync." % instance.name)
1079 a8083063 Iustin Pop
  return not cumul_degraded
1080 a8083063 Iustin Pop
1081 a8083063 Iustin Pop
1082 a8083063 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary):
1083 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1084 a8083063 Iustin Pop

1085 a8083063 Iustin Pop
  """
1086 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1087 a8083063 Iustin Pop
1088 a8083063 Iustin Pop
  result = True
1089 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1090 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1091 a8083063 Iustin Pop
    if not rstats:
1092 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1093 a8083063 Iustin Pop
      result = False
1094 a8083063 Iustin Pop
    else:
1095 a8083063 Iustin Pop
      result = result and (not rstats[5])
1096 a8083063 Iustin Pop
  if dev.children:
1097 a8083063 Iustin Pop
    for child in dev.children:
1098 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1099 a8083063 Iustin Pop
1100 a8083063 Iustin Pop
  return result
1101 a8083063 Iustin Pop
1102 a8083063 Iustin Pop
1103 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1104 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1105 a8083063 Iustin Pop

1106 a8083063 Iustin Pop
  """
1107 a8083063 Iustin Pop
  _OP_REQP = []
1108 a8083063 Iustin Pop
1109 a8083063 Iustin Pop
  def CheckPrereq(self):
1110 a8083063 Iustin Pop
    """Check prerequisites.
1111 a8083063 Iustin Pop

1112 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1113 a8083063 Iustin Pop

1114 a8083063 Iustin Pop
    """
1115 a8083063 Iustin Pop
    return
1116 a8083063 Iustin Pop
1117 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1118 a8083063 Iustin Pop
    """Compute the list of OSes.
1119 a8083063 Iustin Pop

1120 a8083063 Iustin Pop
    """
1121 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1122 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1123 a8083063 Iustin Pop
    if node_data == False:
1124 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1125 a8083063 Iustin Pop
    return node_data
1126 a8083063 Iustin Pop
1127 a8083063 Iustin Pop
1128 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1129 a8083063 Iustin Pop
  """Logical unit for removing a node.
1130 a8083063 Iustin Pop

1131 a8083063 Iustin Pop
  """
1132 a8083063 Iustin Pop
  HPATH = "node-remove"
1133 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1134 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1135 a8083063 Iustin Pop
1136 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1137 a8083063 Iustin Pop
    """Build hooks env.
1138 a8083063 Iustin Pop

1139 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1140 a8083063 Iustin Pop
    node would not allows itself to run.
1141 a8083063 Iustin Pop

1142 a8083063 Iustin Pop
    """
1143 396e1b78 Michael Hanselmann
    env = {
1144 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1145 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1146 396e1b78 Michael Hanselmann
      }
1147 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1148 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1149 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1150 a8083063 Iustin Pop
1151 a8083063 Iustin Pop
  def CheckPrereq(self):
1152 a8083063 Iustin Pop
    """Check prerequisites.
1153 a8083063 Iustin Pop

1154 a8083063 Iustin Pop
    This checks:
1155 a8083063 Iustin Pop
     - the node exists in the configuration
1156 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1157 a8083063 Iustin Pop
     - it's not the master
1158 a8083063 Iustin Pop

1159 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1160 a8083063 Iustin Pop

1161 a8083063 Iustin Pop
    """
1162 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1163 a8083063 Iustin Pop
    if node is None:
1164 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1165 a8083063 Iustin Pop
1166 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1167 a8083063 Iustin Pop
1168 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1169 a8083063 Iustin Pop
    if node.name == masternode:
1170 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1171 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1172 a8083063 Iustin Pop
1173 a8083063 Iustin Pop
    for instance_name in instance_list:
1174 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1175 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1176 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1177 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1178 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1179 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1180 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1181 a8083063 Iustin Pop
    self.op.node_name = node.name
1182 a8083063 Iustin Pop
    self.node = node
1183 a8083063 Iustin Pop
1184 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1185 a8083063 Iustin Pop
    """Removes the node from the cluster.
1186 a8083063 Iustin Pop

1187 a8083063 Iustin Pop
    """
1188 a8083063 Iustin Pop
    node = self.node
1189 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1190 a8083063 Iustin Pop
                node.name)
1191 a8083063 Iustin Pop
1192 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1193 a8083063 Iustin Pop
1194 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1195 a8083063 Iustin Pop
1196 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1197 a8083063 Iustin Pop
1198 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1199 a8083063 Iustin Pop
1200 a8083063 Iustin Pop
1201 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1202 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1203 a8083063 Iustin Pop

1204 a8083063 Iustin Pop
  """
1205 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1206 a8083063 Iustin Pop
1207 a8083063 Iustin Pop
  def CheckPrereq(self):
1208 a8083063 Iustin Pop
    """Check prerequisites.
1209 a8083063 Iustin Pop

1210 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1211 a8083063 Iustin Pop

1212 a8083063 Iustin Pop
    """
1213 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1214 3ef10550 Michael Hanselmann
                                     "mtotal", "mnode", "mfree",
1215 3ef10550 Michael Hanselmann
                                     "bootid"])
1216 a8083063 Iustin Pop
1217 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1218 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1219 ec223efb Iustin Pop
                               "pip", "sip"],
1220 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1221 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1222 a8083063 Iustin Pop
1223 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1224 a8083063 Iustin Pop
1225 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1226 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1227 a8083063 Iustin Pop

1228 a8083063 Iustin Pop
    """
1229 246e180a Iustin Pop
    nodenames = self.wanted
1230 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1231 a8083063 Iustin Pop
1232 a8083063 Iustin Pop
    # begin data gathering
1233 a8083063 Iustin Pop
1234 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1235 a8083063 Iustin Pop
      live_data = {}
1236 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1237 a8083063 Iustin Pop
      for name in nodenames:
1238 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1239 a8083063 Iustin Pop
        if nodeinfo:
1240 a8083063 Iustin Pop
          live_data[name] = {
1241 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1242 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1243 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1244 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1245 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1246 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1247 a8083063 Iustin Pop
            }
1248 a8083063 Iustin Pop
        else:
1249 a8083063 Iustin Pop
          live_data[name] = {}
1250 a8083063 Iustin Pop
    else:
1251 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1252 a8083063 Iustin Pop
1253 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1254 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1255 a8083063 Iustin Pop
1256 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1257 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1258 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1259 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1260 a8083063 Iustin Pop
1261 ec223efb Iustin Pop
      for instance_name in instancelist:
1262 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1263 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1264 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1265 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1266 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1267 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1268 a8083063 Iustin Pop
1269 a8083063 Iustin Pop
    # end data gathering
1270 a8083063 Iustin Pop
1271 a8083063 Iustin Pop
    output = []
1272 a8083063 Iustin Pop
    for node in nodelist:
1273 a8083063 Iustin Pop
      node_output = []
1274 a8083063 Iustin Pop
      for field in self.op.output_fields:
1275 a8083063 Iustin Pop
        if field == "name":
1276 a8083063 Iustin Pop
          val = node.name
1277 ec223efb Iustin Pop
        elif field == "pinst_list":
1278 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1279 ec223efb Iustin Pop
        elif field == "sinst_list":
1280 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1281 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1282 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1283 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1284 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1285 a8083063 Iustin Pop
        elif field == "pip":
1286 a8083063 Iustin Pop
          val = node.primary_ip
1287 a8083063 Iustin Pop
        elif field == "sip":
1288 a8083063 Iustin Pop
          val = node.secondary_ip
1289 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1290 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1291 a8083063 Iustin Pop
        else:
1292 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1293 a8083063 Iustin Pop
        node_output.append(val)
1294 a8083063 Iustin Pop
      output.append(node_output)
1295 a8083063 Iustin Pop
1296 a8083063 Iustin Pop
    return output
1297 a8083063 Iustin Pop
1298 a8083063 Iustin Pop
1299 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1300 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1301 dcb93971 Michael Hanselmann

1302 dcb93971 Michael Hanselmann
  """
1303 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1304 dcb93971 Michael Hanselmann
1305 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1306 dcb93971 Michael Hanselmann
    """Check prerequisites.
1307 dcb93971 Michael Hanselmann

1308 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1309 dcb93971 Michael Hanselmann

1310 dcb93971 Michael Hanselmann
    """
1311 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1312 dcb93971 Michael Hanselmann
1313 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1314 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1315 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1316 dcb93971 Michael Hanselmann
1317 dcb93971 Michael Hanselmann
1318 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1319 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1320 dcb93971 Michael Hanselmann

1321 dcb93971 Michael Hanselmann
    """
1322 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1323 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1324 dcb93971 Michael Hanselmann
1325 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1326 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1327 dcb93971 Michael Hanselmann
1328 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1329 dcb93971 Michael Hanselmann
1330 dcb93971 Michael Hanselmann
    output = []
1331 dcb93971 Michael Hanselmann
    for node in nodenames:
1332 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1333 37d19eb2 Michael Hanselmann
        continue
1334 37d19eb2 Michael Hanselmann
1335 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1336 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1337 dcb93971 Michael Hanselmann
1338 dcb93971 Michael Hanselmann
      for vol in node_vols:
1339 dcb93971 Michael Hanselmann
        node_output = []
1340 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1341 dcb93971 Michael Hanselmann
          if field == "node":
1342 dcb93971 Michael Hanselmann
            val = node
1343 dcb93971 Michael Hanselmann
          elif field == "phys":
1344 dcb93971 Michael Hanselmann
            val = vol['dev']
1345 dcb93971 Michael Hanselmann
          elif field == "vg":
1346 dcb93971 Michael Hanselmann
            val = vol['vg']
1347 dcb93971 Michael Hanselmann
          elif field == "name":
1348 dcb93971 Michael Hanselmann
            val = vol['name']
1349 dcb93971 Michael Hanselmann
          elif field == "size":
1350 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1351 dcb93971 Michael Hanselmann
          elif field == "instance":
1352 dcb93971 Michael Hanselmann
            for inst in ilist:
1353 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1354 dcb93971 Michael Hanselmann
                continue
1355 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1356 dcb93971 Michael Hanselmann
                val = inst.name
1357 dcb93971 Michael Hanselmann
                break
1358 dcb93971 Michael Hanselmann
            else:
1359 dcb93971 Michael Hanselmann
              val = '-'
1360 dcb93971 Michael Hanselmann
          else:
1361 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1362 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1363 dcb93971 Michael Hanselmann
1364 dcb93971 Michael Hanselmann
        output.append(node_output)
1365 dcb93971 Michael Hanselmann
1366 dcb93971 Michael Hanselmann
    return output
1367 dcb93971 Michael Hanselmann
1368 dcb93971 Michael Hanselmann
1369 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1370 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1371 a8083063 Iustin Pop

1372 a8083063 Iustin Pop
  """
1373 a8083063 Iustin Pop
  HPATH = "node-add"
1374 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1375 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1376 a8083063 Iustin Pop
1377 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1378 a8083063 Iustin Pop
    """Build hooks env.
1379 a8083063 Iustin Pop

1380 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1381 a8083063 Iustin Pop

1382 a8083063 Iustin Pop
    """
1383 a8083063 Iustin Pop
    env = {
1384 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1385 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1386 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1387 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1388 a8083063 Iustin Pop
      }
1389 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1390 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1391 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1392 a8083063 Iustin Pop
1393 a8083063 Iustin Pop
  def CheckPrereq(self):
1394 a8083063 Iustin Pop
    """Check prerequisites.
1395 a8083063 Iustin Pop

1396 a8083063 Iustin Pop
    This checks:
1397 a8083063 Iustin Pop
     - the new node is not already in the config
1398 a8083063 Iustin Pop
     - it is resolvable
1399 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1400 a8083063 Iustin Pop

1401 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1402 a8083063 Iustin Pop

1403 a8083063 Iustin Pop
    """
1404 a8083063 Iustin Pop
    node_name = self.op.node_name
1405 a8083063 Iustin Pop
    cfg = self.cfg
1406 a8083063 Iustin Pop
1407 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1408 a8083063 Iustin Pop
1409 bcf043c9 Iustin Pop
    node = dns_data.name
1410 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1411 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1412 a8083063 Iustin Pop
    if secondary_ip is None:
1413 a8083063 Iustin Pop
      secondary_ip = primary_ip
1414 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1415 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1416 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1417 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1418 a8083063 Iustin Pop
    if node in node_list:
1419 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1420 3ecf6786 Iustin Pop
                                 % node)
1421 a8083063 Iustin Pop
1422 a8083063 Iustin Pop
    for existing_node_name in node_list:
1423 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1424 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1425 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1426 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1427 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1428 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1429 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1430 a8083063 Iustin Pop
1431 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1432 a8083063 Iustin Pop
    # same as for the master
1433 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1434 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1435 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1436 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1437 a8083063 Iustin Pop
      if master_singlehomed:
1438 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1439 3ecf6786 Iustin Pop
                                   " new node has one")
1440 a8083063 Iustin Pop
      else:
1441 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1442 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1443 a8083063 Iustin Pop
1444 a8083063 Iustin Pop
    # checks reachablity
1445 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(utils.HostInfo().name,
1446 16abfbc2 Alexander Schreiber
                         primary_ip,
1447 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
1448 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1449 a8083063 Iustin Pop
1450 a8083063 Iustin Pop
    if not newbie_singlehomed:
1451 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1452 16abfbc2 Alexander Schreiber
      if not utils.TcpPing(myself.secondary_ip,
1453 16abfbc2 Alexander Schreiber
                           secondary_ip,
1454 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT):
1455 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError(
1456 16abfbc2 Alexander Schreiber
          "Node secondary ip not reachable by TCP based ping to noded port")
1457 a8083063 Iustin Pop
1458 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1459 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1460 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1461 a8083063 Iustin Pop
1462 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1463 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1464 a8083063 Iustin Pop

1465 a8083063 Iustin Pop
    """
1466 a8083063 Iustin Pop
    new_node = self.new_node
1467 a8083063 Iustin Pop
    node = new_node.name
1468 a8083063 Iustin Pop
1469 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1470 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1471 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1472 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1473 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1474 a8083063 Iustin Pop
    try:
1475 a8083063 Iustin Pop
      gntpem = f.read(8192)
1476 a8083063 Iustin Pop
    finally:
1477 a8083063 Iustin Pop
      f.close()
1478 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1479 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1480 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1481 a8083063 Iustin Pop
    # parsed by the shell sequence below
1482 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1483 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1484 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1485 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1486 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1487 a8083063 Iustin Pop
1488 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1489 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1490 a8083063 Iustin Pop
    # either by being constants or by the checks above
1491 a8083063 Iustin Pop
    ss = self.sstore
1492 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1493 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1494 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1495 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1496 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1497 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1498 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1499 a8083063 Iustin Pop
1500 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1501 a8083063 Iustin Pop
    if result.failed:
1502 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1503 3ecf6786 Iustin Pop
                               " output: %s" %
1504 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1505 a8083063 Iustin Pop
1506 a8083063 Iustin Pop
    # check connectivity
1507 a8083063 Iustin Pop
    time.sleep(4)
1508 a8083063 Iustin Pop
1509 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1510 a8083063 Iustin Pop
    if result:
1511 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1512 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1513 a8083063 Iustin Pop
                    (node, result))
1514 a8083063 Iustin Pop
      else:
1515 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1516 3ecf6786 Iustin Pop
                                 " node version %s" %
1517 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1518 a8083063 Iustin Pop
    else:
1519 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1520 a8083063 Iustin Pop
1521 a8083063 Iustin Pop
    # setup ssh on node
1522 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1523 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1524 a8083063 Iustin Pop
    keyarray = []
1525 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1526 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1527 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1528 a8083063 Iustin Pop
1529 a8083063 Iustin Pop
    for i in keyfiles:
1530 a8083063 Iustin Pop
      f = open(i, 'r')
1531 a8083063 Iustin Pop
      try:
1532 a8083063 Iustin Pop
        keyarray.append(f.read())
1533 a8083063 Iustin Pop
      finally:
1534 a8083063 Iustin Pop
        f.close()
1535 a8083063 Iustin Pop
1536 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1537 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1538 a8083063 Iustin Pop
1539 a8083063 Iustin Pop
    if not result:
1540 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1541 a8083063 Iustin Pop
1542 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1543 a8083063 Iustin Pop
    _UpdateEtcHosts(new_node.name, new_node.primary_ip)
1544 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1545 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1546 a8083063 Iustin Pop
1547 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1548 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1549 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1550 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1551 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1552 16abfbc2 Alexander Schreiber
                                    10, False):
1553 3ecf6786 Iustin Pop
        raise errors.OpExecError("Node claims it doesn't have the"
1554 3ecf6786 Iustin Pop
                                 " secondary ip you gave (%s).\n"
1555 3ecf6786 Iustin Pop
                                 "Please fix and re-run this command." %
1556 3ecf6786 Iustin Pop
                                 new_node.secondary_ip)
1557 a8083063 Iustin Pop
1558 ff98055b Iustin Pop
    success, msg = ssh.VerifyNodeHostname(node)
1559 ff98055b Iustin Pop
    if not success:
1560 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1561 ff98055b Iustin Pop
                               " than the one the resolver gives: %s.\n"
1562 ff98055b Iustin Pop
                               "Please fix and re-run this command." %
1563 ff98055b Iustin Pop
                               (node, msg))
1564 ff98055b Iustin Pop
1565 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1566 a8083063 Iustin Pop
    # including the node just added
1567 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1568 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1569 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1570 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1571 a8083063 Iustin Pop
1572 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1573 82122173 Iustin Pop
    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
1574 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1575 a8083063 Iustin Pop
      for to_node in dist_nodes:
1576 a8083063 Iustin Pop
        if not result[to_node]:
1577 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1578 a8083063 Iustin Pop
                       (fname, to_node))
1579 a8083063 Iustin Pop
1580 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1581 a8083063 Iustin Pop
    for fname in to_copy:
1582 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1583 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1584 a8083063 Iustin Pop
1585 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1586 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1587 a8083063 Iustin Pop
1588 a8083063 Iustin Pop
1589 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1590 a8083063 Iustin Pop
  """Failover the master node to the current node.
1591 a8083063 Iustin Pop

1592 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1593 a8083063 Iustin Pop

1594 a8083063 Iustin Pop
  """
1595 a8083063 Iustin Pop
  HPATH = "master-failover"
1596 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1597 a8083063 Iustin Pop
  REQ_MASTER = False
1598 a8083063 Iustin Pop
  _OP_REQP = []
1599 a8083063 Iustin Pop
1600 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1601 a8083063 Iustin Pop
    """Build hooks env.
1602 a8083063 Iustin Pop

1603 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1604 a8083063 Iustin Pop
    the nodes in the post phase.
1605 a8083063 Iustin Pop

1606 a8083063 Iustin Pop
    """
1607 a8083063 Iustin Pop
    env = {
1608 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1609 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1610 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1611 a8083063 Iustin Pop
      }
1612 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1613 a8083063 Iustin Pop
1614 a8083063 Iustin Pop
  def CheckPrereq(self):
1615 a8083063 Iustin Pop
    """Check prerequisites.
1616 a8083063 Iustin Pop

1617 a8083063 Iustin Pop
    This checks that we are not already the master.
1618 a8083063 Iustin Pop

1619 a8083063 Iustin Pop
    """
1620 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1621 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1622 a8083063 Iustin Pop
1623 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1624 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1625 3ecf6786 Iustin Pop
                                 " where you want the new master to be.\n"
1626 3ecf6786 Iustin Pop
                                 "%s is already the master" %
1627 3ecf6786 Iustin Pop
                                 self.old_master)
1628 a8083063 Iustin Pop
1629 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1630 a8083063 Iustin Pop
    """Failover the master node.
1631 a8083063 Iustin Pop

1632 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1633 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1634 a8083063 Iustin Pop
    master.
1635 a8083063 Iustin Pop

1636 a8083063 Iustin Pop
    """
1637 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1638 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1639 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1640 a8083063 Iustin Pop
1641 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1642 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1643 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1644 a8083063 Iustin Pop
1645 880478f8 Iustin Pop
    ss = self.sstore
1646 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1647 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1648 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1649 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1650 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1651 880478f8 Iustin Pop
1652 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1653 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1654 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1655 880478f8 Iustin Pop
      feedback_fn("Error in activating the master IP on the new master,\n"
1656 880478f8 Iustin Pop
                  "please fix manually.")
1657 a8083063 Iustin Pop
1658 a8083063 Iustin Pop
1659 a8083063 Iustin Pop
1660 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1661 a8083063 Iustin Pop
  """Query cluster configuration.
1662 a8083063 Iustin Pop

1663 a8083063 Iustin Pop
  """
1664 a8083063 Iustin Pop
  _OP_REQP = []
1665 59322403 Iustin Pop
  REQ_MASTER = False
1666 a8083063 Iustin Pop
1667 a8083063 Iustin Pop
  def CheckPrereq(self):
1668 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1669 a8083063 Iustin Pop

1670 a8083063 Iustin Pop
    """
1671 a8083063 Iustin Pop
    pass
1672 a8083063 Iustin Pop
1673 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1674 a8083063 Iustin Pop
    """Return cluster config.
1675 a8083063 Iustin Pop

1676 a8083063 Iustin Pop
    """
1677 a8083063 Iustin Pop
    result = {
1678 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1679 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1680 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1681 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1682 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1683 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1684 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1685 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1686 a8083063 Iustin Pop
      }
1687 a8083063 Iustin Pop
1688 a8083063 Iustin Pop
    return result
1689 a8083063 Iustin Pop
1690 a8083063 Iustin Pop
1691 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1692 a8083063 Iustin Pop
  """Copy file to cluster.
1693 a8083063 Iustin Pop

1694 a8083063 Iustin Pop
  """
1695 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1696 a8083063 Iustin Pop
1697 a8083063 Iustin Pop
  def CheckPrereq(self):
1698 a8083063 Iustin Pop
    """Check prerequisites.
1699 a8083063 Iustin Pop

1700 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1701 a8083063 Iustin Pop
    of nodes is valid.
1702 a8083063 Iustin Pop

1703 a8083063 Iustin Pop
    """
1704 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1705 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1706 dcb93971 Michael Hanselmann
1707 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1708 a8083063 Iustin Pop
1709 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1710 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1711 a8083063 Iustin Pop

1712 a8083063 Iustin Pop
    Args:
1713 a8083063 Iustin Pop
      opts - class with options as members
1714 a8083063 Iustin Pop
      args - list containing a single element, the file name
1715 a8083063 Iustin Pop
    Opts used:
1716 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1717 a8083063 Iustin Pop

1718 a8083063 Iustin Pop
    """
1719 a8083063 Iustin Pop
    filename = self.op.filename
1720 a8083063 Iustin Pop
1721 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
1722 a8083063 Iustin Pop
1723 a7ba5e53 Iustin Pop
    for node in self.nodes:
1724 a8083063 Iustin Pop
      if node == myname:
1725 a8083063 Iustin Pop
        continue
1726 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1727 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1728 a8083063 Iustin Pop
1729 a8083063 Iustin Pop
1730 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1731 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1732 a8083063 Iustin Pop

1733 a8083063 Iustin Pop
  """
1734 a8083063 Iustin Pop
  _OP_REQP = []
1735 a8083063 Iustin Pop
1736 a8083063 Iustin Pop
  def CheckPrereq(self):
1737 a8083063 Iustin Pop
    """No prerequisites.
1738 a8083063 Iustin Pop

1739 a8083063 Iustin Pop
    """
1740 a8083063 Iustin Pop
    pass
1741 a8083063 Iustin Pop
1742 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1743 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1744 a8083063 Iustin Pop

1745 a8083063 Iustin Pop
    """
1746 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1747 a8083063 Iustin Pop
1748 a8083063 Iustin Pop
1749 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1750 a8083063 Iustin Pop
  """Run a command on some nodes.
1751 a8083063 Iustin Pop

1752 a8083063 Iustin Pop
  """
1753 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1754 a8083063 Iustin Pop
1755 a8083063 Iustin Pop
  def CheckPrereq(self):
1756 a8083063 Iustin Pop
    """Check prerequisites.
1757 a8083063 Iustin Pop

1758 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1759 a8083063 Iustin Pop

1760 a8083063 Iustin Pop
    """
1761 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1762 a8083063 Iustin Pop
1763 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1764 a8083063 Iustin Pop
    """Run a command on some nodes.
1765 a8083063 Iustin Pop

1766 a8083063 Iustin Pop
    """
1767 a8083063 Iustin Pop
    data = []
1768 a8083063 Iustin Pop
    for node in self.nodes:
1769 a7ba5e53 Iustin Pop
      result = ssh.SSHCall(node, "root", self.op.command)
1770 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1771 a8083063 Iustin Pop
1772 a8083063 Iustin Pop
    return data
1773 a8083063 Iustin Pop
1774 a8083063 Iustin Pop
1775 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1776 a8083063 Iustin Pop
  """Bring up an instance's disks.
1777 a8083063 Iustin Pop

1778 a8083063 Iustin Pop
  """
1779 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1780 a8083063 Iustin Pop
1781 a8083063 Iustin Pop
  def CheckPrereq(self):
1782 a8083063 Iustin Pop
    """Check prerequisites.
1783 a8083063 Iustin Pop

1784 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1785 a8083063 Iustin Pop

1786 a8083063 Iustin Pop
    """
1787 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1788 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1789 a8083063 Iustin Pop
    if instance is None:
1790 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1791 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1792 a8083063 Iustin Pop
    self.instance = instance
1793 a8083063 Iustin Pop
1794 a8083063 Iustin Pop
1795 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1796 a8083063 Iustin Pop
    """Activate the disks.
1797 a8083063 Iustin Pop

1798 a8083063 Iustin Pop
    """
1799 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1800 a8083063 Iustin Pop
    if not disks_ok:
1801 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1802 a8083063 Iustin Pop
1803 a8083063 Iustin Pop
    return disks_info
1804 a8083063 Iustin Pop
1805 a8083063 Iustin Pop
1806 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1807 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1808 a8083063 Iustin Pop

1809 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1810 a8083063 Iustin Pop

1811 a8083063 Iustin Pop
  Args:
1812 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1813 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1814 a8083063 Iustin Pop
                        in an error return from the function
1815 a8083063 Iustin Pop

1816 a8083063 Iustin Pop
  Returns:
1817 a8083063 Iustin Pop
    false if the operation failed
1818 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1819 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1820 a8083063 Iustin Pop
  """
1821 a8083063 Iustin Pop
  device_info = []
1822 a8083063 Iustin Pop
  disks_ok = True
1823 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1824 a8083063 Iustin Pop
    master_result = None
1825 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1826 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1827 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1828 a8083063 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, is_primary)
1829 a8083063 Iustin Pop
      if not result:
1830 a8083063 Iustin Pop
        logger.Error("could not prepare block device %s on node %s (is_pri"
1831 a8083063 Iustin Pop
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1832 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1833 a8083063 Iustin Pop
          disks_ok = False
1834 a8083063 Iustin Pop
      if is_primary:
1835 a8083063 Iustin Pop
        master_result = result
1836 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1837 a8083063 Iustin Pop
                        master_result))
1838 a8083063 Iustin Pop
1839 a8083063 Iustin Pop
  return disks_ok, device_info
1840 a8083063 Iustin Pop
1841 a8083063 Iustin Pop
1842 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1843 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1844 3ecf6786 Iustin Pop

1845 3ecf6786 Iustin Pop
  """
1846 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1847 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1848 fe7b0351 Michael Hanselmann
  if not disks_ok:
1849 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1850 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1851 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1852 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1853 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1854 fe7b0351 Michael Hanselmann
1855 fe7b0351 Michael Hanselmann
1856 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1857 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1858 a8083063 Iustin Pop

1859 a8083063 Iustin Pop
  """
1860 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1861 a8083063 Iustin Pop
1862 a8083063 Iustin Pop
  def CheckPrereq(self):
1863 a8083063 Iustin Pop
    """Check prerequisites.
1864 a8083063 Iustin Pop

1865 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1866 a8083063 Iustin Pop

1867 a8083063 Iustin Pop
    """
1868 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1869 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1870 a8083063 Iustin Pop
    if instance is None:
1871 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1872 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1873 a8083063 Iustin Pop
    self.instance = instance
1874 a8083063 Iustin Pop
1875 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1876 a8083063 Iustin Pop
    """Deactivate the disks
1877 a8083063 Iustin Pop

1878 a8083063 Iustin Pop
    """
1879 a8083063 Iustin Pop
    instance = self.instance
1880 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1881 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1882 a8083063 Iustin Pop
    if not type(ins_l) is list:
1883 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1884 3ecf6786 Iustin Pop
                               instance.primary_node)
1885 a8083063 Iustin Pop
1886 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1887 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1888 3ecf6786 Iustin Pop
                               " block devices.")
1889 a8083063 Iustin Pop
1890 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1891 a8083063 Iustin Pop
1892 a8083063 Iustin Pop
1893 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1894 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1895 a8083063 Iustin Pop

1896 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1897 a8083063 Iustin Pop

1898 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1899 a8083063 Iustin Pop
  ignored.
1900 a8083063 Iustin Pop

1901 a8083063 Iustin Pop
  """
1902 a8083063 Iustin Pop
  result = True
1903 a8083063 Iustin Pop
  for disk in instance.disks:
1904 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1905 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1906 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1907 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1908 a8083063 Iustin Pop
                     (disk.iv_name, node))
1909 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1910 a8083063 Iustin Pop
          result = False
1911 a8083063 Iustin Pop
  return result
1912 a8083063 Iustin Pop
1913 a8083063 Iustin Pop
1914 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1915 a8083063 Iustin Pop
  """Starts an instance.
1916 a8083063 Iustin Pop

1917 a8083063 Iustin Pop
  """
1918 a8083063 Iustin Pop
  HPATH = "instance-start"
1919 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1920 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1921 a8083063 Iustin Pop
1922 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1923 a8083063 Iustin Pop
    """Build hooks env.
1924 a8083063 Iustin Pop

1925 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1926 a8083063 Iustin Pop

1927 a8083063 Iustin Pop
    """
1928 a8083063 Iustin Pop
    env = {
1929 a8083063 Iustin Pop
      "FORCE": self.op.force,
1930 a8083063 Iustin Pop
      }
1931 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1932 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1933 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1934 a8083063 Iustin Pop
    return env, nl, nl
1935 a8083063 Iustin Pop
1936 a8083063 Iustin Pop
  def CheckPrereq(self):
1937 a8083063 Iustin Pop
    """Check prerequisites.
1938 a8083063 Iustin Pop

1939 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1940 a8083063 Iustin Pop

1941 a8083063 Iustin Pop
    """
1942 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1943 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1944 a8083063 Iustin Pop
    if instance is None:
1945 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1946 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1947 a8083063 Iustin Pop
1948 a8083063 Iustin Pop
    # check bridges existance
1949 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
1950 a8083063 Iustin Pop
1951 a8083063 Iustin Pop
    self.instance = instance
1952 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1953 a8083063 Iustin Pop
1954 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1955 a8083063 Iustin Pop
    """Start the instance.
1956 a8083063 Iustin Pop

1957 a8083063 Iustin Pop
    """
1958 a8083063 Iustin Pop
    instance = self.instance
1959 a8083063 Iustin Pop
    force = self.op.force
1960 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1961 a8083063 Iustin Pop
1962 a8083063 Iustin Pop
    node_current = instance.primary_node
1963 a8083063 Iustin Pop
1964 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1965 a8083063 Iustin Pop
    if not nodeinfo:
1966 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact node %s for infos" %
1967 3ecf6786 Iustin Pop
                               (node_current))
1968 a8083063 Iustin Pop
1969 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
1970 a8083063 Iustin Pop
    memory = instance.memory
1971 a8083063 Iustin Pop
    if memory > freememory:
1972 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to start instance"
1973 3ecf6786 Iustin Pop
                               " %s on node %s"
1974 3ecf6786 Iustin Pop
                               " needed %s MiB, available %s MiB" %
1975 3ecf6786 Iustin Pop
                               (instance.name, node_current, memory,
1976 3ecf6786 Iustin Pop
                                freememory))
1977 a8083063 Iustin Pop
1978 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
1979 a8083063 Iustin Pop
1980 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
1981 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
1982 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
1983 a8083063 Iustin Pop
1984 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
1985 a8083063 Iustin Pop
1986 a8083063 Iustin Pop
1987 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
1988 bf6929a2 Alexander Schreiber
  """Reboot an instance.
1989 bf6929a2 Alexander Schreiber

1990 bf6929a2 Alexander Schreiber
  """
1991 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
1992 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
1993 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
1994 bf6929a2 Alexander Schreiber
1995 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
1996 bf6929a2 Alexander Schreiber
    """Build hooks env.
1997 bf6929a2 Alexander Schreiber

1998 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
1999 bf6929a2 Alexander Schreiber

2000 bf6929a2 Alexander Schreiber
    """
2001 bf6929a2 Alexander Schreiber
    env = {
2002 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2003 bf6929a2 Alexander Schreiber
      }
2004 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2005 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2006 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2007 bf6929a2 Alexander Schreiber
    return env, nl, nl
2008 bf6929a2 Alexander Schreiber
2009 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2010 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2011 bf6929a2 Alexander Schreiber

2012 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2013 bf6929a2 Alexander Schreiber

2014 bf6929a2 Alexander Schreiber
    """
2015 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2016 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2017 bf6929a2 Alexander Schreiber
    if instance is None:
2018 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2019 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2020 bf6929a2 Alexander Schreiber
2021 bf6929a2 Alexander Schreiber
    # check bridges existance
2022 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2023 bf6929a2 Alexander Schreiber
2024 bf6929a2 Alexander Schreiber
    self.instance = instance
2025 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2026 bf6929a2 Alexander Schreiber
2027 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2028 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2029 bf6929a2 Alexander Schreiber

2030 bf6929a2 Alexander Schreiber
    """
2031 bf6929a2 Alexander Schreiber
    instance = self.instance
2032 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2033 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2034 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2035 bf6929a2 Alexander Schreiber
2036 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2037 bf6929a2 Alexander Schreiber
2038 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2039 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2040 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2041 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2042 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2043 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2044 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2045 bf6929a2 Alexander Schreiber
2046 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2047 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2048 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2049 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2050 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2051 bf6929a2 Alexander Schreiber
    else:
2052 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2053 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2054 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2055 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2056 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2057 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2058 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2059 bf6929a2 Alexander Schreiber
2060 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2061 bf6929a2 Alexander Schreiber
2062 bf6929a2 Alexander Schreiber
2063 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2064 a8083063 Iustin Pop
  """Shutdown an instance.
2065 a8083063 Iustin Pop

2066 a8083063 Iustin Pop
  """
2067 a8083063 Iustin Pop
  HPATH = "instance-stop"
2068 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2069 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2070 a8083063 Iustin Pop
2071 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2072 a8083063 Iustin Pop
    """Build hooks env.
2073 a8083063 Iustin Pop

2074 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2075 a8083063 Iustin Pop

2076 a8083063 Iustin Pop
    """
2077 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2078 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2079 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2080 a8083063 Iustin Pop
    return env, nl, nl
2081 a8083063 Iustin Pop
2082 a8083063 Iustin Pop
  def CheckPrereq(self):
2083 a8083063 Iustin Pop
    """Check prerequisites.
2084 a8083063 Iustin Pop

2085 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2086 a8083063 Iustin Pop

2087 a8083063 Iustin Pop
    """
2088 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2089 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2090 a8083063 Iustin Pop
    if instance is None:
2091 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2092 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2093 a8083063 Iustin Pop
    self.instance = instance
2094 a8083063 Iustin Pop
2095 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2096 a8083063 Iustin Pop
    """Shutdown the instance.
2097 a8083063 Iustin Pop

2098 a8083063 Iustin Pop
    """
2099 a8083063 Iustin Pop
    instance = self.instance
2100 a8083063 Iustin Pop
    node_current = instance.primary_node
2101 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2102 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2103 a8083063 Iustin Pop
2104 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2105 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2106 a8083063 Iustin Pop
2107 a8083063 Iustin Pop
2108 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2109 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2110 fe7b0351 Michael Hanselmann

2111 fe7b0351 Michael Hanselmann
  """
2112 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2113 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2114 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2115 fe7b0351 Michael Hanselmann
2116 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2117 fe7b0351 Michael Hanselmann
    """Build hooks env.
2118 fe7b0351 Michael Hanselmann

2119 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2120 fe7b0351 Michael Hanselmann

2121 fe7b0351 Michael Hanselmann
    """
2122 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2123 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2124 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2125 fe7b0351 Michael Hanselmann
    return env, nl, nl
2126 fe7b0351 Michael Hanselmann
2127 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2128 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2129 fe7b0351 Michael Hanselmann

2130 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2131 fe7b0351 Michael Hanselmann

2132 fe7b0351 Michael Hanselmann
    """
2133 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2134 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2135 fe7b0351 Michael Hanselmann
    if instance is None:
2136 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2137 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2138 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2139 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2140 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2141 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2142 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2143 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2144 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2145 fe7b0351 Michael Hanselmann
    if remote_info:
2146 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2147 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2148 3ecf6786 Iustin Pop
                                  instance.primary_node))
2149 d0834de3 Michael Hanselmann
2150 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2151 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2152 d0834de3 Michael Hanselmann
      # OS verification
2153 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2154 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2155 d0834de3 Michael Hanselmann
      if pnode is None:
2156 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2157 3ecf6786 Iustin Pop
                                   self.op.pnode)
2158 d0834de3 Michael Hanselmann
      os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2159 d0834de3 Michael Hanselmann
      if not isinstance(os_obj, objects.OS):
2160 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2161 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2162 d0834de3 Michael Hanselmann
2163 fe7b0351 Michael Hanselmann
    self.instance = instance
2164 fe7b0351 Michael Hanselmann
2165 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2166 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2167 fe7b0351 Michael Hanselmann

2168 fe7b0351 Michael Hanselmann
    """
2169 fe7b0351 Michael Hanselmann
    inst = self.instance
2170 fe7b0351 Michael Hanselmann
2171 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2172 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2173 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2174 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2175 d0834de3 Michael Hanselmann
2176 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2177 fe7b0351 Michael Hanselmann
    try:
2178 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2179 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2180 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not install OS for instance %s "
2181 3ecf6786 Iustin Pop
                                 "on node %s" %
2182 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2183 fe7b0351 Michael Hanselmann
    finally:
2184 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2185 fe7b0351 Michael Hanselmann
2186 fe7b0351 Michael Hanselmann
2187 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2188 decd5f45 Iustin Pop
  """Rename an instance.
2189 decd5f45 Iustin Pop

2190 decd5f45 Iustin Pop
  """
2191 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2192 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2193 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2194 decd5f45 Iustin Pop
2195 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2196 decd5f45 Iustin Pop
    """Build hooks env.
2197 decd5f45 Iustin Pop

2198 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2199 decd5f45 Iustin Pop

2200 decd5f45 Iustin Pop
    """
2201 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2202 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2203 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2204 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2205 decd5f45 Iustin Pop
    return env, nl, nl
2206 decd5f45 Iustin Pop
2207 decd5f45 Iustin Pop
  def CheckPrereq(self):
2208 decd5f45 Iustin Pop
    """Check prerequisites.
2209 decd5f45 Iustin Pop

2210 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2211 decd5f45 Iustin Pop

2212 decd5f45 Iustin Pop
    """
2213 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2214 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2215 decd5f45 Iustin Pop
    if instance is None:
2216 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2217 decd5f45 Iustin Pop
                                 self.op.instance_name)
2218 decd5f45 Iustin Pop
    if instance.status != "down":
2219 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2220 decd5f45 Iustin Pop
                                 self.op.instance_name)
2221 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2222 decd5f45 Iustin Pop
    if remote_info:
2223 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2224 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2225 decd5f45 Iustin Pop
                                  instance.primary_node))
2226 decd5f45 Iustin Pop
    self.instance = instance
2227 decd5f45 Iustin Pop
2228 decd5f45 Iustin Pop
    # new name verification
2229 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2230 decd5f45 Iustin Pop
2231 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2232 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2233 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2234 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2235 decd5f45 Iustin Pop
      if not result.failed:
2236 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2237 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2238 decd5f45 Iustin Pop
2239 decd5f45 Iustin Pop
2240 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2241 decd5f45 Iustin Pop
    """Reinstall the instance.
2242 decd5f45 Iustin Pop

2243 decd5f45 Iustin Pop
    """
2244 decd5f45 Iustin Pop
    inst = self.instance
2245 decd5f45 Iustin Pop
    old_name = inst.name
2246 decd5f45 Iustin Pop
2247 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2248 decd5f45 Iustin Pop
2249 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2250 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2251 decd5f45 Iustin Pop
2252 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2253 decd5f45 Iustin Pop
    try:
2254 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2255 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2256 decd5f45 Iustin Pop
        msg = ("Could run OS rename script for instance %s\n"
2257 decd5f45 Iustin Pop
               "on node %s\n"
2258 decd5f45 Iustin Pop
               "(but the instance has been renamed in Ganeti)" %
2259 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2260 decd5f45 Iustin Pop
        logger.Error(msg)
2261 decd5f45 Iustin Pop
    finally:
2262 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2263 decd5f45 Iustin Pop
2264 decd5f45 Iustin Pop
2265 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2266 a8083063 Iustin Pop
  """Remove an instance.
2267 a8083063 Iustin Pop

2268 a8083063 Iustin Pop
  """
2269 a8083063 Iustin Pop
  HPATH = "instance-remove"
2270 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2271 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2272 a8083063 Iustin Pop
2273 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2274 a8083063 Iustin Pop
    """Build hooks env.
2275 a8083063 Iustin Pop

2276 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2277 a8083063 Iustin Pop

2278 a8083063 Iustin Pop
    """
2279 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2280 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2281 a8083063 Iustin Pop
    return env, nl, nl
2282 a8083063 Iustin Pop
2283 a8083063 Iustin Pop
  def CheckPrereq(self):
2284 a8083063 Iustin Pop
    """Check prerequisites.
2285 a8083063 Iustin Pop

2286 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2287 a8083063 Iustin Pop

2288 a8083063 Iustin Pop
    """
2289 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2290 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2291 a8083063 Iustin Pop
    if instance is None:
2292 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2293 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2294 a8083063 Iustin Pop
    self.instance = instance
2295 a8083063 Iustin Pop
2296 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2297 a8083063 Iustin Pop
    """Remove the instance.
2298 a8083063 Iustin Pop

2299 a8083063 Iustin Pop
    """
2300 a8083063 Iustin Pop
    instance = self.instance
2301 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2302 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2303 a8083063 Iustin Pop
2304 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2305 1d67656e Iustin Pop
      if self.op.ignore_failures:
2306 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2307 1d67656e Iustin Pop
      else:
2308 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2309 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2310 a8083063 Iustin Pop
2311 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2312 a8083063 Iustin Pop
2313 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2314 1d67656e Iustin Pop
      if self.op.ignore_failures:
2315 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2316 1d67656e Iustin Pop
      else:
2317 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2318 a8083063 Iustin Pop
2319 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2320 a8083063 Iustin Pop
2321 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2322 a8083063 Iustin Pop
2323 a8083063 Iustin Pop
2324 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2325 a8083063 Iustin Pop
  """Logical unit for querying instances.
2326 a8083063 Iustin Pop

2327 a8083063 Iustin Pop
  """
2328 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2329 a8083063 Iustin Pop
2330 a8083063 Iustin Pop
  def CheckPrereq(self):
2331 a8083063 Iustin Pop
    """Check prerequisites.
2332 a8083063 Iustin Pop

2333 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2334 a8083063 Iustin Pop

2335 a8083063 Iustin Pop
    """
2336 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
2337 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2338 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2339 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2340 644eeef9 Iustin Pop
                               "sda_size", "sdb_size"],
2341 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2342 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2343 a8083063 Iustin Pop
2344 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2345 069dcc86 Iustin Pop
2346 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2347 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2348 a8083063 Iustin Pop

2349 a8083063 Iustin Pop
    """
2350 069dcc86 Iustin Pop
    instance_names = self.wanted
2351 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2352 a8083063 Iustin Pop
                     in instance_names]
2353 a8083063 Iustin Pop
2354 a8083063 Iustin Pop
    # begin data gathering
2355 a8083063 Iustin Pop
2356 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2357 a8083063 Iustin Pop
2358 a8083063 Iustin Pop
    bad_nodes = []
2359 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2360 a8083063 Iustin Pop
      live_data = {}
2361 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2362 a8083063 Iustin Pop
      for name in nodes:
2363 a8083063 Iustin Pop
        result = node_data[name]
2364 a8083063 Iustin Pop
        if result:
2365 a8083063 Iustin Pop
          live_data.update(result)
2366 a8083063 Iustin Pop
        elif result == False:
2367 a8083063 Iustin Pop
          bad_nodes.append(name)
2368 a8083063 Iustin Pop
        # else no instance is alive
2369 a8083063 Iustin Pop
    else:
2370 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2371 a8083063 Iustin Pop
2372 a8083063 Iustin Pop
    # end data gathering
2373 a8083063 Iustin Pop
2374 a8083063 Iustin Pop
    output = []
2375 a8083063 Iustin Pop
    for instance in instance_list:
2376 a8083063 Iustin Pop
      iout = []
2377 a8083063 Iustin Pop
      for field in self.op.output_fields:
2378 a8083063 Iustin Pop
        if field == "name":
2379 a8083063 Iustin Pop
          val = instance.name
2380 a8083063 Iustin Pop
        elif field == "os":
2381 a8083063 Iustin Pop
          val = instance.os
2382 a8083063 Iustin Pop
        elif field == "pnode":
2383 a8083063 Iustin Pop
          val = instance.primary_node
2384 a8083063 Iustin Pop
        elif field == "snodes":
2385 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2386 a8083063 Iustin Pop
        elif field == "admin_state":
2387 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2388 a8083063 Iustin Pop
        elif field == "oper_state":
2389 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2390 8a23d2d3 Iustin Pop
            val = None
2391 a8083063 Iustin Pop
          else:
2392 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2393 a8083063 Iustin Pop
        elif field == "admin_ram":
2394 a8083063 Iustin Pop
          val = instance.memory
2395 a8083063 Iustin Pop
        elif field == "oper_ram":
2396 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2397 8a23d2d3 Iustin Pop
            val = None
2398 a8083063 Iustin Pop
          elif instance.name in live_data:
2399 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2400 a8083063 Iustin Pop
          else:
2401 a8083063 Iustin Pop
            val = "-"
2402 a8083063 Iustin Pop
        elif field == "disk_template":
2403 a8083063 Iustin Pop
          val = instance.disk_template
2404 a8083063 Iustin Pop
        elif field == "ip":
2405 a8083063 Iustin Pop
          val = instance.nics[0].ip
2406 a8083063 Iustin Pop
        elif field == "bridge":
2407 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2408 a8083063 Iustin Pop
        elif field == "mac":
2409 a8083063 Iustin Pop
          val = instance.nics[0].mac
2410 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2411 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2412 644eeef9 Iustin Pop
          if disk is None:
2413 8a23d2d3 Iustin Pop
            val = None
2414 644eeef9 Iustin Pop
          else:
2415 644eeef9 Iustin Pop
            val = disk.size
2416 a8083063 Iustin Pop
        else:
2417 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2418 a8083063 Iustin Pop
        iout.append(val)
2419 a8083063 Iustin Pop
      output.append(iout)
2420 a8083063 Iustin Pop
2421 a8083063 Iustin Pop
    return output
2422 a8083063 Iustin Pop
2423 a8083063 Iustin Pop
2424 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2425 a8083063 Iustin Pop
  """Failover an instance.
2426 a8083063 Iustin Pop

2427 a8083063 Iustin Pop
  """
2428 a8083063 Iustin Pop
  HPATH = "instance-failover"
2429 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2430 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2431 a8083063 Iustin Pop
2432 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2433 a8083063 Iustin Pop
    """Build hooks env.
2434 a8083063 Iustin Pop

2435 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2436 a8083063 Iustin Pop

2437 a8083063 Iustin Pop
    """
2438 a8083063 Iustin Pop
    env = {
2439 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2440 a8083063 Iustin Pop
      }
2441 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2442 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2443 a8083063 Iustin Pop
    return env, nl, nl
2444 a8083063 Iustin Pop
2445 a8083063 Iustin Pop
  def CheckPrereq(self):
2446 a8083063 Iustin Pop
    """Check prerequisites.
2447 a8083063 Iustin Pop

2448 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2449 a8083063 Iustin Pop

2450 a8083063 Iustin Pop
    """
2451 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2452 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2453 a8083063 Iustin Pop
    if instance is None:
2454 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2455 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2456 a8083063 Iustin Pop
2457 2a710df1 Michael Hanselmann
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2458 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2459 2a710df1 Michael Hanselmann
                                 " remote_raid1.")
2460 2a710df1 Michael Hanselmann
2461 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2462 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2463 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2464 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2465 2a710df1 Michael Hanselmann
2466 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2467 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2468 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2469 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2470 3a7c308e Guido Trotter
    if not info:
2471 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
2472 3ecf6786 Iustin Pop
                                 " from node '%s'" % nodeinfo)
2473 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2474 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Not enough memory on target node %s."
2475 3ecf6786 Iustin Pop
                                 " %d MB available, %d MB required" %
2476 3ecf6786 Iustin Pop
                                 (target_node, info['memory_free'],
2477 3ecf6786 Iustin Pop
                                  instance.memory))
2478 3a7c308e Guido Trotter
2479 a8083063 Iustin Pop
    # check bridge existance
2480 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2481 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
2482 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2483 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2484 3ecf6786 Iustin Pop
                                 (brlist, instance.primary_node))
2485 a8083063 Iustin Pop
2486 a8083063 Iustin Pop
    self.instance = instance
2487 a8083063 Iustin Pop
2488 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2489 a8083063 Iustin Pop
    """Failover an instance.
2490 a8083063 Iustin Pop

2491 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2492 a8083063 Iustin Pop
    starting it on the secondary.
2493 a8083063 Iustin Pop

2494 a8083063 Iustin Pop
    """
2495 a8083063 Iustin Pop
    instance = self.instance
2496 a8083063 Iustin Pop
2497 a8083063 Iustin Pop
    source_node = instance.primary_node
2498 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2499 a8083063 Iustin Pop
2500 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2501 a8083063 Iustin Pop
    for dev in instance.disks:
2502 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2503 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2504 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2505 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2506 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2507 a8083063 Iustin Pop
2508 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2509 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2510 a8083063 Iustin Pop
2511 a8083063 Iustin Pop
    if not nodeinfo:
2512 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact target node %s." %
2513 3ecf6786 Iustin Pop
                               target_node)
2514 a8083063 Iustin Pop
2515 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2516 a8083063 Iustin Pop
    memory = instance.memory
2517 a8083063 Iustin Pop
    if memory > free_memory:
2518 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to create instance %s on"
2519 3ecf6786 Iustin Pop
                               " node %s. needed %s MiB, available %s MiB" %
2520 3ecf6786 Iustin Pop
                               (instance.name, target_node, memory,
2521 3ecf6786 Iustin Pop
                                free_memory))
2522 a8083063 Iustin Pop
2523 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2524 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2525 a8083063 Iustin Pop
                (instance.name, source_node))
2526 a8083063 Iustin Pop
2527 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2528 a8083063 Iustin Pop
      logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2529 a8083063 Iustin Pop
                   " anyway. Please make sure node %s is down"  %
2530 a8083063 Iustin Pop
                   (instance.name, source_node, source_node))
2531 a8083063 Iustin Pop
2532 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2533 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2534 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2535 a8083063 Iustin Pop
2536 a8083063 Iustin Pop
    instance.primary_node = target_node
2537 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2538 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2539 a8083063 Iustin Pop
2540 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2541 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2542 a8083063 Iustin Pop
                (instance.name, target_node))
2543 a8083063 Iustin Pop
2544 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2545 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2546 a8083063 Iustin Pop
    if not disks_ok:
2547 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2548 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2549 a8083063 Iustin Pop
2550 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2551 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2552 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2553 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2554 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2555 a8083063 Iustin Pop
2556 a8083063 Iustin Pop
2557 a0c3fea1 Michael Hanselmann
def _CreateBlockDevOnPrimary(cfg, node, device, info):
2558 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2559 a8083063 Iustin Pop

2560 a8083063 Iustin Pop
  This always creates all devices.
2561 a8083063 Iustin Pop

2562 a8083063 Iustin Pop
  """
2563 a8083063 Iustin Pop
  if device.children:
2564 a8083063 Iustin Pop
    for child in device.children:
2565 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnPrimary(cfg, node, child, info):
2566 a8083063 Iustin Pop
        return False
2567 a8083063 Iustin Pop
2568 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2569 a0c3fea1 Michael Hanselmann
  new_id = rpc.call_blockdev_create(node, device, device.size, True, info)
2570 a8083063 Iustin Pop
  if not new_id:
2571 a8083063 Iustin Pop
    return False
2572 a8083063 Iustin Pop
  if device.physical_id is None:
2573 a8083063 Iustin Pop
    device.physical_id = new_id
2574 a8083063 Iustin Pop
  return True
2575 a8083063 Iustin Pop
2576 a8083063 Iustin Pop
2577 a0c3fea1 Michael Hanselmann
def _CreateBlockDevOnSecondary(cfg, node, device, force, info):
2578 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2579 a8083063 Iustin Pop

2580 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2581 a8083063 Iustin Pop
  all its children.
2582 a8083063 Iustin Pop

2583 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2584 a8083063 Iustin Pop

2585 a8083063 Iustin Pop
  """
2586 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2587 a8083063 Iustin Pop
    force = True
2588 a8083063 Iustin Pop
  if device.children:
2589 a8083063 Iustin Pop
    for child in device.children:
2590 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, node, child, force, info):
2591 a8083063 Iustin Pop
        return False
2592 a8083063 Iustin Pop
2593 a8083063 Iustin Pop
  if not force:
2594 a8083063 Iustin Pop
    return True
2595 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2596 a0c3fea1 Michael Hanselmann
  new_id = rpc.call_blockdev_create(node, device, device.size, False, info)
2597 a8083063 Iustin Pop
  if not new_id:
2598 a8083063 Iustin Pop
    return False
2599 a8083063 Iustin Pop
  if device.physical_id is None:
2600 a8083063 Iustin Pop
    device.physical_id = new_id
2601 a8083063 Iustin Pop
  return True
2602 a8083063 Iustin Pop
2603 a8083063 Iustin Pop
2604 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2605 923b1523 Iustin Pop
  """Generate a suitable LV name.
2606 923b1523 Iustin Pop

2607 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2608 923b1523 Iustin Pop

2609 923b1523 Iustin Pop
  """
2610 923b1523 Iustin Pop
  results = []
2611 923b1523 Iustin Pop
  for val in exts:
2612 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2613 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2614 923b1523 Iustin Pop
  return results
2615 923b1523 Iustin Pop
2616 923b1523 Iustin Pop
2617 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2618 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2619 a8083063 Iustin Pop

2620 a8083063 Iustin Pop
  """
2621 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2622 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2623 a8083063 Iustin Pop
  dev_data = objects.Disk(dev_type="lvm", size=size,
2624 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2625 a8083063 Iustin Pop
  dev_meta = objects.Disk(dev_type="lvm", size=128,
2626 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2627 a8083063 Iustin Pop
  drbd_dev = objects.Disk(dev_type="drbd", size=size,
2628 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2629 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2630 a8083063 Iustin Pop
  return drbd_dev
2631 a8083063 Iustin Pop
2632 a8083063 Iustin Pop
2633 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2634 a8083063 Iustin Pop
                          instance_name, primary_node,
2635 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2636 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2637 a8083063 Iustin Pop

2638 a8083063 Iustin Pop
  """
2639 a8083063 Iustin Pop
  #TODO: compute space requirements
2640 a8083063 Iustin Pop
2641 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2642 a8083063 Iustin Pop
  if template_name == "diskless":
2643 a8083063 Iustin Pop
    disks = []
2644 a8083063 Iustin Pop
  elif template_name == "plain":
2645 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2646 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2647 923b1523 Iustin Pop
2648 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2649 a8083063 Iustin Pop
    sda_dev = objects.Disk(dev_type="lvm", size=disk_sz,
2650 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2651 a8083063 Iustin Pop
                           iv_name = "sda")
2652 a8083063 Iustin Pop
    sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz,
2653 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2654 a8083063 Iustin Pop
                           iv_name = "sdb")
2655 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2656 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2657 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2658 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2659 923b1523 Iustin Pop
2660 923b1523 Iustin Pop
2661 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2662 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2663 a8083063 Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz,
2664 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2665 a8083063 Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz,
2666 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2667 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda",
2668 a8083063 Iustin Pop
                              size=disk_sz,
2669 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2670 a8083063 Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz,
2671 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2672 a8083063 Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz,
2673 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2674 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb",
2675 a8083063 Iustin Pop
                              size=swap_sz,
2676 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2677 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2678 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2679 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2680 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2681 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2682 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2683 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2684 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2685 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2686 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda",
2687 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2688 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2689 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2690 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb",
2691 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2692 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2693 a8083063 Iustin Pop
  else:
2694 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2695 a8083063 Iustin Pop
  return disks
2696 a8083063 Iustin Pop
2697 a8083063 Iustin Pop
2698 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2699 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2700 3ecf6786 Iustin Pop

2701 3ecf6786 Iustin Pop
  """
2702 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2703 a0c3fea1 Michael Hanselmann
2704 a0c3fea1 Michael Hanselmann
2705 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2706 a8083063 Iustin Pop
  """Create all disks for an instance.
2707 a8083063 Iustin Pop

2708 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2709 a8083063 Iustin Pop

2710 a8083063 Iustin Pop
  Args:
2711 a8083063 Iustin Pop
    instance: the instance object
2712 a8083063 Iustin Pop

2713 a8083063 Iustin Pop
  Returns:
2714 a8083063 Iustin Pop
    True or False showing the success of the creation process
2715 a8083063 Iustin Pop

2716 a8083063 Iustin Pop
  """
2717 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2718 a0c3fea1 Michael Hanselmann
2719 a8083063 Iustin Pop
  for device in instance.disks:
2720 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2721 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2722 a8083063 Iustin Pop
    #HARDCODE
2723 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2724 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False,
2725 a0c3fea1 Michael Hanselmann
                                        info):
2726 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2727 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2728 a8083063 Iustin Pop
        return False
2729 a8083063 Iustin Pop
    #HARDCODE
2730 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device, info):
2731 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2732 a8083063 Iustin Pop
                   device.iv_name)
2733 a8083063 Iustin Pop
      return False
2734 a8083063 Iustin Pop
  return True
2735 a8083063 Iustin Pop
2736 a8083063 Iustin Pop
2737 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2738 a8083063 Iustin Pop
  """Remove all disks for an instance.
2739 a8083063 Iustin Pop

2740 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2741 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2742 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2743 a8083063 Iustin Pop
  with `_CreateDisks()`).
2744 a8083063 Iustin Pop

2745 a8083063 Iustin Pop
  Args:
2746 a8083063 Iustin Pop
    instance: the instance object
2747 a8083063 Iustin Pop

2748 a8083063 Iustin Pop
  Returns:
2749 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2750 a8083063 Iustin Pop

2751 a8083063 Iustin Pop
  """
2752 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2753 a8083063 Iustin Pop
2754 a8083063 Iustin Pop
  result = True
2755 a8083063 Iustin Pop
  for device in instance.disks:
2756 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2757 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2758 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2759 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2760 a8083063 Iustin Pop
                     " continuing anyway" %
2761 a8083063 Iustin Pop
                     (device.iv_name, node))
2762 a8083063 Iustin Pop
        result = False
2763 a8083063 Iustin Pop
  return result
2764 a8083063 Iustin Pop
2765 a8083063 Iustin Pop
2766 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2767 a8083063 Iustin Pop
  """Create an instance.
2768 a8083063 Iustin Pop

2769 a8083063 Iustin Pop
  """
2770 a8083063 Iustin Pop
  HPATH = "instance-add"
2771 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2772 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2773 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2774 bdd55f71 Iustin Pop
              "wait_for_sync", "ip_check"]
2775 a8083063 Iustin Pop
2776 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2777 a8083063 Iustin Pop
    """Build hooks env.
2778 a8083063 Iustin Pop

2779 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2780 a8083063 Iustin Pop

2781 a8083063 Iustin Pop
    """
2782 a8083063 Iustin Pop
    env = {
2783 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2784 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2785 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2786 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2787 a8083063 Iustin Pop
      }
2788 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2789 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2790 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2791 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2792 396e1b78 Michael Hanselmann
2793 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2794 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2795 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2796 396e1b78 Michael Hanselmann
      status=self.instance_status,
2797 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2798 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2799 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2800 396e1b78 Michael Hanselmann
      nics=[(self.inst_ip, self.op.bridge)],
2801 396e1b78 Michael Hanselmann
    ))
2802 a8083063 Iustin Pop
2803 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2804 a8083063 Iustin Pop
          self.secondaries)
2805 a8083063 Iustin Pop
    return env, nl, nl
2806 a8083063 Iustin Pop
2807 a8083063 Iustin Pop
2808 a8083063 Iustin Pop
  def CheckPrereq(self):
2809 a8083063 Iustin Pop
    """Check prerequisites.
2810 a8083063 Iustin Pop

2811 a8083063 Iustin Pop
    """
2812 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2813 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2814 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2815 3ecf6786 Iustin Pop
                                 self.op.mode)
2816 a8083063 Iustin Pop
2817 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2818 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2819 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2820 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2821 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2822 3ecf6786 Iustin Pop
                                   " node and path options")
2823 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2824 a8083063 Iustin Pop
      if src_node_full is None:
2825 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2826 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2827 a8083063 Iustin Pop
2828 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2829 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2830 a8083063 Iustin Pop
2831 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2832 a8083063 Iustin Pop
2833 a8083063 Iustin Pop
      if not export_info:
2834 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2835 a8083063 Iustin Pop
2836 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2837 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2838 a8083063 Iustin Pop
2839 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2840 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2841 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2842 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2843 a8083063 Iustin Pop
2844 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2845 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2846 3ecf6786 Iustin Pop
                                   " one data disk")
2847 a8083063 Iustin Pop
2848 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2849 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2850 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2851 a8083063 Iustin Pop
                                                         'disk0_dump'))
2852 a8083063 Iustin Pop
      self.src_image = diskimage
2853 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2854 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2855 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2856 a8083063 Iustin Pop
2857 a8083063 Iustin Pop
    # check primary node
2858 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2859 a8083063 Iustin Pop
    if pnode is None:
2860 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2861 3ecf6786 Iustin Pop
                                 self.op.pnode)
2862 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2863 a8083063 Iustin Pop
    self.pnode = pnode
2864 a8083063 Iustin Pop
    self.secondaries = []
2865 a8083063 Iustin Pop
    # disk template and mirror node verification
2866 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2867 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2868 a8083063 Iustin Pop
2869 a8083063 Iustin Pop
    if self.op.disk_template == constants.DT_REMOTE_RAID1:
2870 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2871 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The 'remote_raid1' disk template needs"
2872 3ecf6786 Iustin Pop
                                   " a mirror node")
2873 a8083063 Iustin Pop
2874 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2875 a8083063 Iustin Pop
      if snode_name is None:
2876 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2877 3ecf6786 Iustin Pop
                                   self.op.snode)
2878 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2879 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
2880 3ecf6786 Iustin Pop
                                   " the primary node.")
2881 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2882 a8083063 Iustin Pop
2883 ed1ebc60 Guido Trotter
    # Check lv size requirements
2884 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2885 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2886 ed1ebc60 Guido Trotter
2887 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2888 ed1ebc60 Guido Trotter
    req_size_dict = {
2889 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2890 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2891 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2892 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2893 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2894 ed1ebc60 Guido Trotter
    }
2895 ed1ebc60 Guido Trotter
2896 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2897 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
2898 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
2899 ed1ebc60 Guido Trotter
2900 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2901 ed1ebc60 Guido Trotter
2902 ed1ebc60 Guido Trotter
    for node in nodenames:
2903 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2904 ed1ebc60 Guido Trotter
      if not info:
2905 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
2906 3ecf6786 Iustin Pop
                                   " from node '%s'" % nodeinfo)
2907 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2908 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s."
2909 3ecf6786 Iustin Pop
                                   " %d MB available, %d MB required" %
2910 3ecf6786 Iustin Pop
                                   (node, info['vg_free'], req_size))
2911 ed1ebc60 Guido Trotter
2912 a8083063 Iustin Pop
    # os verification
2913 a8083063 Iustin Pop
    os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2914 a8083063 Iustin Pop
    if not isinstance(os_obj, objects.OS):
2915 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
2916 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
2917 a8083063 Iustin Pop
2918 a8083063 Iustin Pop
    # instance verification
2919 89e1fc26 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
2920 a8083063 Iustin Pop
2921 bcf043c9 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
2922 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2923 a8083063 Iustin Pop
    if instance_name in instance_list:
2924 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2925 3ecf6786 Iustin Pop
                                 instance_name)
2926 a8083063 Iustin Pop
2927 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2928 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2929 a8083063 Iustin Pop
      inst_ip = None
2930 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2931 bcf043c9 Iustin Pop
      inst_ip = hostname1.ip
2932 a8083063 Iustin Pop
    else:
2933 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2934 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
2935 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
2936 a8083063 Iustin Pop
      inst_ip = ip
2937 a8083063 Iustin Pop
    self.inst_ip = inst_ip
2938 a8083063 Iustin Pop
2939 bdd55f71 Iustin Pop
    if self.op.start and not self.op.ip_check:
2940 bdd55f71 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
2941 bdd55f71 Iustin Pop
                                 " adding an instance in start mode")
2942 bdd55f71 Iustin Pop
2943 bdd55f71 Iustin Pop
    if self.op.ip_check:
2944 16abfbc2 Alexander Schreiber
      if utils.TcpPing(utils.HostInfo().name, hostname1.ip,
2945 16abfbc2 Alexander Schreiber
                       constants.DEFAULT_NODED_PORT):
2946 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2947 16abfbc2 Alexander Schreiber
                                   (hostname1.ip, instance_name))
2948 a8083063 Iustin Pop
2949 a8083063 Iustin Pop
    # bridge verification
2950 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
2951 a8083063 Iustin Pop
    if bridge is None:
2952 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
2953 a8083063 Iustin Pop
    else:
2954 a8083063 Iustin Pop
      self.op.bridge = bridge
2955 a8083063 Iustin Pop
2956 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
2957 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
2958 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
2959 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
2960 a8083063 Iustin Pop
2961 a8083063 Iustin Pop
    if self.op.start:
2962 a8083063 Iustin Pop
      self.instance_status = 'up'
2963 a8083063 Iustin Pop
    else:
2964 a8083063 Iustin Pop
      self.instance_status = 'down'
2965 a8083063 Iustin Pop
2966 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2967 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
2968 a8083063 Iustin Pop

2969 a8083063 Iustin Pop
    """
2970 a8083063 Iustin Pop
    instance = self.op.instance_name
2971 a8083063 Iustin Pop
    pnode_name = self.pnode.name
2972 a8083063 Iustin Pop
2973 a8083063 Iustin Pop
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
2974 a8083063 Iustin Pop
    if self.inst_ip is not None:
2975 a8083063 Iustin Pop
      nic.ip = self.inst_ip
2976 a8083063 Iustin Pop
2977 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
2978 a8083063 Iustin Pop
                                  self.op.disk_template,
2979 a8083063 Iustin Pop
                                  instance, pnode_name,
2980 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
2981 a8083063 Iustin Pop
                                  self.op.swap_size)
2982 a8083063 Iustin Pop
2983 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
2984 a8083063 Iustin Pop
                            primary_node=pnode_name,
2985 a8083063 Iustin Pop
                            memory=self.op.mem_size,
2986 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
2987 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
2988 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
2989 a8083063 Iustin Pop
                            status=self.instance_status,
2990 a8083063 Iustin Pop
                            )
2991 a8083063 Iustin Pop
2992 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
2993 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
2994 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2995 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
2996 a8083063 Iustin Pop
2997 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
2998 a8083063 Iustin Pop
2999 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3000 a8083063 Iustin Pop
3001 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3002 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj)
3003 2a710df1 Michael Hanselmann
    elif iobj.disk_template == constants.DT_REMOTE_RAID1:
3004 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3005 a8083063 Iustin Pop
      time.sleep(15)
3006 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3007 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
3008 a8083063 Iustin Pop
    else:
3009 a8083063 Iustin Pop
      disk_abort = False
3010 a8083063 Iustin Pop
3011 a8083063 Iustin Pop
    if disk_abort:
3012 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3013 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3014 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3015 3ecf6786 Iustin Pop
                               " this instance")
3016 a8083063 Iustin Pop
3017 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3018 a8083063 Iustin Pop
                (instance, pnode_name))
3019 a8083063 Iustin Pop
3020 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3021 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3022 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3023 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3024 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3025 3ecf6786 Iustin Pop
                                   " on node %s" %
3026 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3027 a8083063 Iustin Pop
3028 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3029 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3030 a8083063 Iustin Pop
        src_node = self.op.src_node
3031 a8083063 Iustin Pop
        src_image = self.src_image
3032 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3033 a8083063 Iustin Pop
                                                src_node, src_image):
3034 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3035 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3036 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3037 a8083063 Iustin Pop
      else:
3038 a8083063 Iustin Pop
        # also checked in the prereq part
3039 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3040 3ecf6786 Iustin Pop
                                     % self.op.mode)
3041 a8083063 Iustin Pop
3042 a8083063 Iustin Pop
    if self.op.start:
3043 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3044 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3045 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3046 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3047 a8083063 Iustin Pop
3048 a8083063 Iustin Pop
3049 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3050 a8083063 Iustin Pop
  """Connect to an instance's console.
3051 a8083063 Iustin Pop

3052 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3053 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3054 a8083063 Iustin Pop
  console.
3055 a8083063 Iustin Pop

3056 a8083063 Iustin Pop
  """
3057 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3058 a8083063 Iustin Pop
3059 a8083063 Iustin Pop
  def CheckPrereq(self):
3060 a8083063 Iustin Pop
    """Check prerequisites.
3061 a8083063 Iustin Pop

3062 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3063 a8083063 Iustin Pop

3064 a8083063 Iustin Pop
    """
3065 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3066 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3067 a8083063 Iustin Pop
    if instance is None:
3068 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3069 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3070 a8083063 Iustin Pop
    self.instance = instance
3071 a8083063 Iustin Pop
3072 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3073 a8083063 Iustin Pop
    """Connect to the console of an instance
3074 a8083063 Iustin Pop

3075 a8083063 Iustin Pop
    """
3076 a8083063 Iustin Pop
    instance = self.instance
3077 a8083063 Iustin Pop
    node = instance.primary_node
3078 a8083063 Iustin Pop
3079 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3080 a8083063 Iustin Pop
    if node_insts is False:
3081 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3082 a8083063 Iustin Pop
3083 a8083063 Iustin Pop
    if instance.name not in node_insts:
3084 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3085 a8083063 Iustin Pop
3086 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3087 a8083063 Iustin Pop
3088 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3089 a8083063 Iustin Pop
    console_cmd = hyper.GetShellCommandForConsole(instance.name)
3090 82122173 Iustin Pop
    # build ssh cmdline
3091 82122173 Iustin Pop
    argv = ["ssh", "-q", "-t"]
3092 82122173 Iustin Pop
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
3093 82122173 Iustin Pop
    argv.extend(ssh.BATCH_MODE_OPTS)
3094 82122173 Iustin Pop
    argv.append(node)
3095 82122173 Iustin Pop
    argv.append(console_cmd)
3096 82122173 Iustin Pop
    return "ssh", argv
3097 a8083063 Iustin Pop
3098 a8083063 Iustin Pop
3099 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
3100 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
3101 a8083063 Iustin Pop

3102 a8083063 Iustin Pop
  """
3103 a8083063 Iustin Pop
  HPATH = "mirror-add"
3104 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3105 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3106 a8083063 Iustin Pop
3107 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3108 a8083063 Iustin Pop
    """Build hooks env.
3109 a8083063 Iustin Pop

3110 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3111 a8083063 Iustin Pop

3112 a8083063 Iustin Pop
    """
3113 a8083063 Iustin Pop
    env = {
3114 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3115 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3116 a8083063 Iustin Pop
      }
3117 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3118 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3119 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3120 a8083063 Iustin Pop
    return env, nl, nl
3121 a8083063 Iustin Pop
3122 a8083063 Iustin Pop
  def CheckPrereq(self):
3123 a8083063 Iustin Pop
    """Check prerequisites.
3124 a8083063 Iustin Pop

3125 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3126 a8083063 Iustin Pop

3127 a8083063 Iustin Pop
    """
3128 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3129 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3130 a8083063 Iustin Pop
    if instance is None:
3131 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3132 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3133 a8083063 Iustin Pop
    self.instance = instance
3134 a8083063 Iustin Pop
3135 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3136 a8083063 Iustin Pop
    if remote_node is None:
3137 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3138 a8083063 Iustin Pop
    self.remote_node = remote_node
3139 a8083063 Iustin Pop
3140 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3141 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3142 3ecf6786 Iustin Pop
                                 " the instance.")
3143 a8083063 Iustin Pop
3144 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3145 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3146 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3147 a8083063 Iustin Pop
    for disk in instance.disks:
3148 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3149 a8083063 Iustin Pop
        break
3150 a8083063 Iustin Pop
    else:
3151 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3152 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3153 a8083063 Iustin Pop
    if len(disk.children) > 1:
3154 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The device already has two slave"
3155 3ecf6786 Iustin Pop
                                 " devices.\n"
3156 3ecf6786 Iustin Pop
                                 "This would create a 3-disk raid1"
3157 3ecf6786 Iustin Pop
                                 " which we don't allow.")
3158 a8083063 Iustin Pop
    self.disk = disk
3159 a8083063 Iustin Pop
3160 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3161 a8083063 Iustin Pop
    """Add the mirror component
3162 a8083063 Iustin Pop

3163 a8083063 Iustin Pop
    """
3164 a8083063 Iustin Pop
    disk = self.disk
3165 a8083063 Iustin Pop
    instance = self.instance
3166 a8083063 Iustin Pop
3167 a8083063 Iustin Pop
    remote_node = self.remote_node
3168 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3169 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
3170 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3171 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
3172 a8083063 Iustin Pop
3173 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
3174 a8083063 Iustin Pop
    #HARDCODE
3175 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False,
3176 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3177 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
3178 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
3179 a8083063 Iustin Pop
3180 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
3181 a8083063 Iustin Pop
    #HARDCODE
3182 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd,
3183 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
3184 a8083063 Iustin Pop
      # remove secondary dev
3185 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3186 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
3187 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
3188 a8083063 Iustin Pop
3189 a8083063 Iustin Pop
    # the device exists now
3190 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
3191 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
3192 a8083063 Iustin Pop
    if not rpc.call_blockdev_addchild(instance.primary_node,
3193 a8083063 Iustin Pop
                                           disk, new_drbd):
3194 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
3195 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3196 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3197 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
3198 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3199 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3200 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
3201 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
3202 a8083063 Iustin Pop
3203 a8083063 Iustin Pop
    disk.children.append(new_drbd)
3204 a8083063 Iustin Pop
3205 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3206 a8083063 Iustin Pop
3207 a8083063 Iustin Pop
    _WaitForSync(self.cfg, instance)
3208 a8083063 Iustin Pop
3209 a8083063 Iustin Pop
    return 0
3210 a8083063 Iustin Pop
3211 a8083063 Iustin Pop
3212 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
3213 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
3214 a8083063 Iustin Pop

3215 a8083063 Iustin Pop
  """
3216 a8083063 Iustin Pop
  HPATH = "mirror-remove"
3217 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3218 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3219 a8083063 Iustin Pop
3220 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3221 a8083063 Iustin Pop
    """Build hooks env.
3222 a8083063 Iustin Pop

3223 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3224 a8083063 Iustin Pop

3225 a8083063 Iustin Pop
    """
3226 a8083063 Iustin Pop
    env = {
3227 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3228 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
3229 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
3230 a8083063 Iustin Pop
      }
3231 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3232 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3233 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3234 a8083063 Iustin Pop
    return env, nl, nl
3235 a8083063 Iustin Pop
3236 a8083063 Iustin Pop
  def CheckPrereq(self):
3237 a8083063 Iustin Pop
    """Check prerequisites.
3238 a8083063 Iustin Pop

3239 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3240 a8083063 Iustin Pop

3241 a8083063 Iustin Pop
    """
3242 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3243 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3244 a8083063 Iustin Pop
    if instance is None:
3245 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3246 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3247 a8083063 Iustin Pop
    self.instance = instance
3248 a8083063 Iustin Pop
3249 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3250 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3251 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3252 a8083063 Iustin Pop
    for disk in instance.disks:
3253 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3254 a8083063 Iustin Pop
        break
3255 a8083063 Iustin Pop
    else:
3256 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3257 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3258 a8083063 Iustin Pop
    for child in disk.children:
3259 a8083063 Iustin Pop
      if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id:
3260 a8083063 Iustin Pop
        break
3261 a8083063 Iustin Pop
    else:
3262 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3263 a8083063 Iustin Pop
3264 a8083063 Iustin Pop
    if len(disk.children) < 2:
3265 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3266 3ecf6786 Iustin Pop
                                 " a mirror.")
3267 a8083063 Iustin Pop
    self.disk = disk
3268 a8083063 Iustin Pop
    self.child = child
3269 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3270 a8083063 Iustin Pop
      oid = 1
3271 a8083063 Iustin Pop
    else:
3272 a8083063 Iustin Pop
      oid = 0
3273 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3274 a8083063 Iustin Pop
3275 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3276 a8083063 Iustin Pop
    """Remove the mirror component
3277 a8083063 Iustin Pop

3278 a8083063 Iustin Pop
    """
3279 a8083063 Iustin Pop
    instance = self.instance
3280 a8083063 Iustin Pop
    disk = self.disk
3281 a8083063 Iustin Pop
    child = self.child
3282 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3283 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3284 a8083063 Iustin Pop
    if not rpc.call_blockdev_removechild(instance.primary_node,
3285 a8083063 Iustin Pop
                                              disk, child):
3286 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3287 a8083063 Iustin Pop
3288 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3289 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3290 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3291 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3292 a8083063 Iustin Pop
                     " continuing operation." % node)
3293 a8083063 Iustin Pop
3294 a8083063 Iustin Pop
    disk.children.remove(child)
3295 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3296 a8083063 Iustin Pop
3297 a8083063 Iustin Pop
3298 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3299 a8083063 Iustin Pop
  """Replace the disks of an instance.
3300 a8083063 Iustin Pop

3301 a8083063 Iustin Pop
  """
3302 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3303 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3304 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3305 a8083063 Iustin Pop
3306 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3307 a8083063 Iustin Pop
    """Build hooks env.
3308 a8083063 Iustin Pop

3309 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3310 a8083063 Iustin Pop

3311 a8083063 Iustin Pop
    """
3312 a8083063 Iustin Pop
    env = {
3313 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3314 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3315 a8083063 Iustin Pop
      }
3316 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3317 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3318 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3319 a8083063 Iustin Pop
    return env, nl, nl
3320 a8083063 Iustin Pop
3321 a8083063 Iustin Pop
  def CheckPrereq(self):
3322 a8083063 Iustin Pop
    """Check prerequisites.
3323 a8083063 Iustin Pop

3324 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3325 a8083063 Iustin Pop

3326 a8083063 Iustin Pop
    """
3327 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3328 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3329 a8083063 Iustin Pop
    if instance is None:
3330 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3331 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3332 a8083063 Iustin Pop
    self.instance = instance
3333 a8083063 Iustin Pop
3334 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3335 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3336 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3337 a8083063 Iustin Pop
3338 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3339 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3340 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3341 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3342 a8083063 Iustin Pop
3343 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3344 a8083063 Iustin Pop
    if remote_node is None:
3345 a8083063 Iustin Pop
      remote_node = instance.secondary_nodes[0]
3346 a8083063 Iustin Pop
    else:
3347 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3348 a8083063 Iustin Pop
      if remote_node is None:
3349 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3350 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3351 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3352 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3353 3ecf6786 Iustin Pop
                                 " the instance.")
3354 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3355 a8083063 Iustin Pop
3356 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3357 a8083063 Iustin Pop
    """Replace the disks of an instance.
3358 a8083063 Iustin Pop

3359 a8083063 Iustin Pop
    """
3360 a8083063 Iustin Pop
    instance = self.instance
3361 a8083063 Iustin Pop
    iv_names = {}
3362 a8083063 Iustin Pop
    # start of work
3363 a8083063 Iustin Pop
    remote_node = self.op.remote_node
3364 a8083063 Iustin Pop
    cfg = self.cfg
3365 a8083063 Iustin Pop
    for dev in instance.disks:
3366 a8083063 Iustin Pop
      size = dev.size
3367 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3368 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3369 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3370 923b1523 Iustin Pop
                                       remote_node, size, names)
3371 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3372 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3373 a8083063 Iustin Pop
                  dev.iv_name)
3374 a8083063 Iustin Pop
      #HARDCODE
3375 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False,
3376 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3377 3ecf6786 Iustin Pop
        raise errors.OpExecError("Failed to create new component on"
3378 3ecf6786 Iustin Pop
                                 " secondary node %s\n"
3379 3ecf6786 Iustin Pop
                                 "Full abort, cleanup manually!" %
3380 3ecf6786 Iustin Pop
                                 remote_node)
3381 a8083063 Iustin Pop
3382 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3383 a8083063 Iustin Pop
      #HARDCODE
3384 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd,
3385 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3386 a8083063 Iustin Pop
        # remove secondary dev
3387 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3388 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3389 a8083063 Iustin Pop
        raise errors.OpExecError("Failed to create volume on primary!\n"
3390 a8083063 Iustin Pop
                                 "Full abort, cleanup manually!!")
3391 a8083063 Iustin Pop
3392 a8083063 Iustin Pop
      # the device exists now
3393 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3394 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3395 a8083063 Iustin Pop
      if not rpc.call_blockdev_addchild(instance.primary_node, dev,
3396 880478f8 Iustin Pop
                                        new_drbd):
3397 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3398 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3399 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3400 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3401 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3402 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3403 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3404 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3405 a8083063 Iustin Pop
3406 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3407 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3408 a8083063 Iustin Pop
3409 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3410 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3411 a8083063 Iustin Pop
    # return value
3412 a8083063 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3413 a8083063 Iustin Pop
3414 a8083063 Iustin Pop
    # so check manually all the devices
3415 a8083063 Iustin Pop
    for name in iv_names:
3416 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3417 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3418 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3419 a8083063 Iustin Pop
      if is_degr:
3420 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3421 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3422 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3423 a8083063 Iustin Pop
      if is_degr:
3424 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3425 a8083063 Iustin Pop
3426 a8083063 Iustin Pop
    for name in iv_names:
3427 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3428 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3429 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3430 a8083063 Iustin Pop
      if not rpc.call_blockdev_removechild(instance.primary_node,
3431 a8083063 Iustin Pop
                                                dev, child):
3432 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3433 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3434 a8083063 Iustin Pop
        continue
3435 a8083063 Iustin Pop
3436 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3437 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3438 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3439 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3440 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3441 a8083063 Iustin Pop
                       " continuing operation." % node)
3442 a8083063 Iustin Pop
3443 a8083063 Iustin Pop
      dev.children.remove(child)
3444 a8083063 Iustin Pop
3445 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3446 a8083063 Iustin Pop
3447 a8083063 Iustin Pop
3448 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3449 a8083063 Iustin Pop
  """Query runtime instance data.
3450 a8083063 Iustin Pop

3451 a8083063 Iustin Pop
  """
3452 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3453 a8083063 Iustin Pop
3454 a8083063 Iustin Pop
  def CheckPrereq(self):
3455 a8083063 Iustin Pop
    """Check prerequisites.
3456 a8083063 Iustin Pop

3457 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3458 a8083063 Iustin Pop

3459 a8083063 Iustin Pop
    """
3460 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3461 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3462 a8083063 Iustin Pop
    if self.op.instances:
3463 a8083063 Iustin Pop
      self.wanted_instances = []
3464 a8083063 Iustin Pop
      names = self.op.instances
3465 a8083063 Iustin Pop
      for name in names:
3466 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3467 a8083063 Iustin Pop
        if instance is None:
3468 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3469 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3470 a8083063 Iustin Pop
    else:
3471 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3472 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3473 a8083063 Iustin Pop
    return
3474 a8083063 Iustin Pop
3475 a8083063 Iustin Pop
3476 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3477 a8083063 Iustin Pop
    """Compute block device status.
3478 a8083063 Iustin Pop

3479 a8083063 Iustin Pop
    """
3480 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3481 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3482 a8083063 Iustin Pop
    if dev.dev_type == "drbd":
3483 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3484 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3485 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3486 a8083063 Iustin Pop
      else:
3487 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3488 a8083063 Iustin Pop
3489 a8083063 Iustin Pop
    if snode:
3490 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3491 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3492 a8083063 Iustin Pop
    else:
3493 a8083063 Iustin Pop
      dev_sstatus = None
3494 a8083063 Iustin Pop
3495 a8083063 Iustin Pop
    if dev.children:
3496 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3497 a8083063 Iustin Pop
                      for child in dev.children]
3498 a8083063 Iustin Pop
    else:
3499 a8083063 Iustin Pop
      dev_children = []
3500 a8083063 Iustin Pop
3501 a8083063 Iustin Pop
    data = {
3502 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3503 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3504 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3505 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3506 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3507 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3508 a8083063 Iustin Pop
      "children": dev_children,
3509 a8083063 Iustin Pop
      }
3510 a8083063 Iustin Pop
3511 a8083063 Iustin Pop
    return data
3512 a8083063 Iustin Pop
3513 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3514 a8083063 Iustin Pop
    """Gather and return data"""
3515 a8083063 Iustin Pop
    result = {}
3516 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3517 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3518 a8083063 Iustin Pop
                                                instance.name)
3519 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3520 a8083063 Iustin Pop
        remote_state = "up"
3521 a8083063 Iustin Pop
      else:
3522 a8083063 Iustin Pop
        remote_state = "down"
3523 a8083063 Iustin Pop
      if instance.status == "down":
3524 a8083063 Iustin Pop
        config_state = "down"
3525 a8083063 Iustin Pop
      else:
3526 a8083063 Iustin Pop
        config_state = "up"
3527 a8083063 Iustin Pop
3528 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3529 a8083063 Iustin Pop
               for device in instance.disks]
3530 a8083063 Iustin Pop
3531 a8083063 Iustin Pop
      idict = {
3532 a8083063 Iustin Pop
        "name": instance.name,
3533 a8083063 Iustin Pop
        "config_state": config_state,
3534 a8083063 Iustin Pop
        "run_state": remote_state,
3535 a8083063 Iustin Pop
        "pnode": instance.primary_node,
3536 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
3537 a8083063 Iustin Pop
        "os": instance.os,
3538 a8083063 Iustin Pop
        "memory": instance.memory,
3539 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3540 a8083063 Iustin Pop
        "disks": disks,
3541 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
3542 a8083063 Iustin Pop
        }
3543 a8083063 Iustin Pop
3544 a8083063 Iustin Pop
      result[instance.name] = idict
3545 a8083063 Iustin Pop
3546 a8083063 Iustin Pop
    return result
3547 a8083063 Iustin Pop
3548 a8083063 Iustin Pop
3549 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
3550 a8083063 Iustin Pop
  """Modifies an instances's parameters.
3551 a8083063 Iustin Pop

3552 a8083063 Iustin Pop
  """
3553 a8083063 Iustin Pop
  HPATH = "instance-modify"
3554 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3555 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3556 a8083063 Iustin Pop
3557 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3558 a8083063 Iustin Pop
    """Build hooks env.
3559 a8083063 Iustin Pop

3560 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
3561 a8083063 Iustin Pop

3562 a8083063 Iustin Pop
    """
3563 396e1b78 Michael Hanselmann
    args = dict()
3564 a8083063 Iustin Pop
    if self.mem:
3565 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
3566 a8083063 Iustin Pop
    if self.vcpus:
3567 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
3568 396e1b78 Michael Hanselmann
    if self.do_ip or self.do_bridge:
3569 396e1b78 Michael Hanselmann
      if self.do_ip:
3570 396e1b78 Michael Hanselmann
        ip = self.ip
3571 396e1b78 Michael Hanselmann
      else:
3572 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
3573 396e1b78 Michael Hanselmann
      if self.bridge:
3574 396e1b78 Michael Hanselmann
        bridge = self.bridge
3575 396e1b78 Michael Hanselmann
      else:
3576 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
3577 396e1b78 Michael Hanselmann
      args['nics'] = [(ip, bridge)]
3578 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
3579 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3580 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3581 a8083063 Iustin Pop
    return env, nl, nl
3582 a8083063 Iustin Pop
3583 a8083063 Iustin Pop
  def CheckPrereq(self):
3584 a8083063 Iustin Pop
    """Check prerequisites.
3585 a8083063 Iustin Pop

3586 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
3587 a8083063 Iustin Pop

3588 a8083063 Iustin Pop
    """
3589 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
3590 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
3591 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
3592 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
3593 a8083063 Iustin Pop
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
3594 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
3595 a8083063 Iustin Pop
    if self.mem is not None:
3596 a8083063 Iustin Pop
      try:
3597 a8083063 Iustin Pop
        self.mem = int(self.mem)
3598 a8083063 Iustin Pop
      except ValueError, err:
3599 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
3600 a8083063 Iustin Pop
    if self.vcpus is not None:
3601 a8083063 Iustin Pop
      try:
3602 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
3603 a8083063 Iustin Pop
      except ValueError, err:
3604 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
3605 a8083063 Iustin Pop
    if self.ip is not None:
3606 a8083063 Iustin Pop
      self.do_ip = True
3607 a8083063 Iustin Pop
      if self.ip.lower() == "none":
3608 a8083063 Iustin Pop
        self.ip = None
3609 a8083063 Iustin Pop
      else:
3610 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
3611 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
3612 a8083063 Iustin Pop
    else:
3613 a8083063 Iustin Pop
      self.do_ip = False
3614 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
3615 a8083063 Iustin Pop
3616 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3617 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3618 a8083063 Iustin Pop
    if instance is None:
3619 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
3620 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3621 a8083063 Iustin Pop
    self.op.instance_name = instance.name
3622 a8083063 Iustin Pop
    self.instance = instance
3623 a8083063 Iustin Pop
    return
3624 a8083063 Iustin Pop
3625 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3626 a8083063 Iustin Pop
    """Modifies an instance.
3627 a8083063 Iustin Pop

3628 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
3629 a8083063 Iustin Pop
    """
3630 a8083063 Iustin Pop
    result = []
3631 a8083063 Iustin Pop
    instance = self.instance
3632 a8083063 Iustin Pop
    if self.mem:
3633 a8083063 Iustin Pop
      instance.memory = self.mem
3634 a8083063 Iustin Pop
      result.append(("mem", self.mem))
3635 a8083063 Iustin Pop
    if self.vcpus:
3636 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
3637 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
3638 a8083063 Iustin Pop
    if self.do_ip:
3639 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
3640 a8083063 Iustin Pop
      result.append(("ip", self.ip))
3641 a8083063 Iustin Pop
    if self.bridge:
3642 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
3643 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
3644 a8083063 Iustin Pop
3645 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3646 a8083063 Iustin Pop
3647 a8083063 Iustin Pop
    return result
3648 a8083063 Iustin Pop
3649 a8083063 Iustin Pop
3650 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
3651 a8083063 Iustin Pop
  """Query the exports list
3652 a8083063 Iustin Pop

3653 a8083063 Iustin Pop
  """
3654 a8083063 Iustin Pop
  _OP_REQP = []
3655 a8083063 Iustin Pop
3656 a8083063 Iustin Pop
  def CheckPrereq(self):
3657 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
3658 a8083063 Iustin Pop

3659 a8083063 Iustin Pop
    """
3660 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
3661 a8083063 Iustin Pop
3662 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3663 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
3664 a8083063 Iustin Pop

3665 a8083063 Iustin Pop
    Returns:
3666 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
3667 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
3668 a8083063 Iustin Pop
      that node.
3669 a8083063 Iustin Pop

3670 a8083063 Iustin Pop
    """
3671 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
3672 a8083063 Iustin Pop
3673 a8083063 Iustin Pop
3674 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
3675 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
3676 a8083063 Iustin Pop

3677 a8083063 Iustin Pop
  """
3678 a8083063 Iustin Pop
  HPATH = "instance-export"
3679 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3680 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
3681 a8083063 Iustin Pop
3682 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3683 a8083063 Iustin Pop
    """Build hooks env.
3684 a8083063 Iustin Pop

3685 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
3686 a8083063 Iustin Pop

3687 a8083063 Iustin Pop
    """
3688 a8083063 Iustin Pop
    env = {
3689 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
3690 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
3691 a8083063 Iustin Pop
      }
3692 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3693 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3694 a8083063 Iustin Pop
          self.op.target_node]
3695 a8083063 Iustin Pop
    return env, nl, nl
3696 a8083063 Iustin Pop
3697 a8083063 Iustin Pop
  def CheckPrereq(self):
3698 a8083063 Iustin Pop
    """Check prerequisites.
3699 a8083063 Iustin Pop

3700 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
3701 a8083063 Iustin Pop

3702 a8083063 Iustin Pop
    """
3703 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
3704 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
3705 a8083063 Iustin Pop
    if self.instance is None:
3706 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
3707 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3708 a8083063 Iustin Pop
3709 a8083063 Iustin Pop
    # node verification
3710 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
3711 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
3712 a8083063 Iustin Pop
3713 a8083063 Iustin Pop
    if self.dst_node is None:
3714 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
3715 3ecf6786 Iustin Pop
                                 self.op.target_node)
3716 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
3717 a8083063 Iustin Pop
3718 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3719 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
3720 a8083063 Iustin Pop

3721 a8083063 Iustin Pop
    """
3722 a8083063 Iustin Pop
    instance = self.instance
3723 a8083063 Iustin Pop
    dst_node = self.dst_node
3724 a8083063 Iustin Pop
    src_node = instance.primary_node
3725 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
3726 a8083063 Iustin Pop
    if self.op.shutdown:
3727 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
3728 a8083063 Iustin Pop
      self.processor.ChainOpCode(op, feedback_fn)
3729 a8083063 Iustin Pop
3730 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
3731 a8083063 Iustin Pop
3732 a8083063 Iustin Pop
    snap_disks = []
3733 a8083063 Iustin Pop
3734 a8083063 Iustin Pop
    try:
3735 a8083063 Iustin Pop
      for disk in instance.disks:
3736 a8083063 Iustin Pop
        if disk.iv_name == "sda":
3737 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
3738 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
3739 a8083063 Iustin Pop
3740 a8083063 Iustin Pop
          if not new_dev_name:
3741 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
3742 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
3743 a8083063 Iustin Pop
          else:
3744 a8083063 Iustin Pop
            new_dev = objects.Disk(dev_type="lvm", size=disk.size,
3745 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
3746 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
3747 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
3748 a8083063 Iustin Pop
            snap_disks.append(new_dev)
3749 a8083063 Iustin Pop
3750 a8083063 Iustin Pop
    finally:
3751 a8083063 Iustin Pop
      if self.op.shutdown:
3752 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
3753 a8083063 Iustin Pop
                                       force=False)
3754 a8083063 Iustin Pop
        self.processor.ChainOpCode(op, feedback_fn)
3755 a8083063 Iustin Pop
3756 a8083063 Iustin Pop
    # TODO: check for size
3757 a8083063 Iustin Pop
3758 a8083063 Iustin Pop
    for dev in snap_disks:
3759 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
3760 a8083063 Iustin Pop
                                           instance):
3761 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
3762 a8083063 Iustin Pop
                     " %s to node %s" %
3763 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
3764 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
3765 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
3766 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
3767 a8083063 Iustin Pop
3768 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
3769 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
3770 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
3771 a8083063 Iustin Pop
3772 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
3773 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
3774 a8083063 Iustin Pop
3775 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
3776 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
3777 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
3778 a8083063 Iustin Pop
    if nodelist:
3779 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
3780 a8083063 Iustin Pop
      exportlist = self.processor.ChainOpCode(op, feedback_fn)
3781 a8083063 Iustin Pop
      for node in exportlist:
3782 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
3783 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
3784 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
3785 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
3786 5c947f38 Iustin Pop
3787 5c947f38 Iustin Pop
3788 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
3789 5c947f38 Iustin Pop
  """Generic tags LU.
3790 5c947f38 Iustin Pop

3791 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
3792 5c947f38 Iustin Pop

3793 5c947f38 Iustin Pop
  """
3794 5c947f38 Iustin Pop
  def CheckPrereq(self):
3795 5c947f38 Iustin Pop
    """Check prerequisites.
3796 5c947f38 Iustin Pop

3797 5c947f38 Iustin Pop
    """
3798 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
3799 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
3800 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
3801 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
3802 5c947f38 Iustin Pop
      if name is None:
3803 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
3804 3ecf6786 Iustin Pop
                                   (self.op.name,))
3805 5c947f38 Iustin Pop
      self.op.name = name
3806 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
3807 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
3808 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
3809 5c947f38 Iustin Pop
      if name is None:
3810 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
3811 3ecf6786 Iustin Pop
                                   (self.op.name,))
3812 5c947f38 Iustin Pop
      self.op.name = name
3813 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
3814 5c947f38 Iustin Pop
    else:
3815 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
3816 3ecf6786 Iustin Pop
                                 str(self.op.kind))
3817 5c947f38 Iustin Pop
3818 5c947f38 Iustin Pop
3819 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
3820 5c947f38 Iustin Pop
  """Returns the tags of a given object.
3821 5c947f38 Iustin Pop

3822 5c947f38 Iustin Pop
  """
3823 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
3824 5c947f38 Iustin Pop
3825 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
3826 5c947f38 Iustin Pop
    """Returns the tag list.
3827 5c947f38 Iustin Pop

3828 5c947f38 Iustin Pop
    """
3829 5c947f38 Iustin Pop
    return self.target.GetTags()
3830 5c947f38 Iustin Pop
3831 5c947f38 Iustin Pop
3832 f27302fa Iustin Pop
class LUAddTags(TagsLU):
3833 5c947f38 Iustin Pop
  """Sets a tag on a given object.
3834 5c947f38 Iustin Pop

3835 5c947f38 Iustin Pop
  """
3836 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
3837 5c947f38 Iustin Pop
3838 5c947f38 Iustin Pop
  def CheckPrereq(self):
3839 5c947f38 Iustin Pop
    """Check prerequisites.
3840 5c947f38 Iustin Pop

3841 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
3842 5c947f38 Iustin Pop

3843 5c947f38 Iustin Pop
    """
3844 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
3845 f27302fa Iustin Pop
    for tag in self.op.tags:
3846 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
3847 5c947f38 Iustin Pop
3848 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
3849 5c947f38 Iustin Pop
    """Sets the tag.
3850 5c947f38 Iustin Pop

3851 5c947f38 Iustin Pop
    """
3852 5c947f38 Iustin Pop
    try:
3853 f27302fa Iustin Pop
      for tag in self.op.tags:
3854 f27302fa Iustin Pop
        self.target.AddTag(tag)
3855 5c947f38 Iustin Pop
    except errors.TagError, err:
3856 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
3857 5c947f38 Iustin Pop
    try:
3858 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
3859 5c947f38 Iustin Pop
    except errors.ConfigurationError:
3860 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
3861 3ecf6786 Iustin Pop
                                " config file and the operation has been"
3862 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
3863 5c947f38 Iustin Pop
3864 5c947f38 Iustin Pop
3865 f27302fa Iustin Pop
class LUDelTags(TagsLU):
3866 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
3867 5c947f38 Iustin Pop

3868 5c947f38 Iustin Pop
  """
3869 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
3870 5c947f38 Iustin Pop
3871 5c947f38 Iustin Pop
  def CheckPrereq(self):
3872 5c947f38 Iustin Pop
    """Check prerequisites.
3873 5c947f38 Iustin Pop

3874 5c947f38 Iustin Pop
    This checks that we have the given tag.
3875 5c947f38 Iustin Pop

3876 5c947f38 Iustin Pop
    """
3877 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
3878 f27302fa Iustin Pop
    for tag in self.op.tags:
3879 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
3880 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
3881 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
3882 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
3883 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
3884 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
3885 f27302fa Iustin Pop
      diff_names.sort()
3886 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
3887 f27302fa Iustin Pop
                                 (",".join(diff_names)))
3888 5c947f38 Iustin Pop
3889 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
3890 5c947f38 Iustin Pop
    """Remove the tag from the object.
3891 5c947f38 Iustin Pop

3892 5c947f38 Iustin Pop
    """
3893 f27302fa Iustin Pop
    for tag in self.op.tags:
3894 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
3895 5c947f38 Iustin Pop
    try:
3896 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
3897 5c947f38 Iustin Pop
    except errors.ConfigurationError:
3898 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
3899 3ecf6786 Iustin Pop
                                " config file and the operation has been"
3900 3ecf6786 Iustin Pop
                                " aborted. Please retry.")