Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 246e180a

History | View | Annotate | Download (116.2 kB)

1 a8083063 Iustin Pop
#!/usr/bin/python
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import socket
30 a8083063 Iustin Pop
import time
31 a8083063 Iustin Pop
import tempfile
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 a8083063 Iustin Pop
35 a8083063 Iustin Pop
from ganeti import rpc
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 a8083063 Iustin Pop
from ganeti import config
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 a8083063 Iustin Pop
from ganeti import ssconf
46 a8083063 Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
52 a8083063 Iustin Pop
      with all the fields (even if as None)
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
57 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 a8083063 Iustin Pop
  REQ_CLUSTER = True
64 a8083063 Iustin Pop
  REQ_MASTER = True
65 a8083063 Iustin Pop
66 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
67 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
68 a8083063 Iustin Pop

69 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
70 a8083063 Iustin Pop
    validity.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    """
73 a8083063 Iustin Pop
    self.processor = processor
74 a8083063 Iustin Pop
    self.op = op
75 a8083063 Iustin Pop
    self.cfg = cfg
76 a8083063 Iustin Pop
    self.sstore = sstore
77 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
78 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
79 a8083063 Iustin Pop
      if attr_val is None:
80 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
81 3ecf6786 Iustin Pop
                                   attr_name)
82 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
83 a8083063 Iustin Pop
      if not cfg.IsCluster():
84 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
85 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
86 a8083063 Iustin Pop
      if self.REQ_MASTER:
87 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
88 a8083063 Iustin Pop
        if master != socket.gethostname():
89 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
90 3ecf6786 Iustin Pop
                                     " node %s" % master)
91 a8083063 Iustin Pop
92 a8083063 Iustin Pop
  def CheckPrereq(self):
93 a8083063 Iustin Pop
    """Check prerequisites for this LU.
94 a8083063 Iustin Pop

95 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
96 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
97 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
98 a8083063 Iustin Pop
    allowed.
99 a8083063 Iustin Pop

100 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
101 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
102 a8083063 Iustin Pop

103 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
104 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
105 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
106 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
107 a8083063 Iustin Pop

108 a8083063 Iustin Pop
    """
109 a8083063 Iustin Pop
    raise NotImplementedError
110 a8083063 Iustin Pop
111 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
112 a8083063 Iustin Pop
    """Execute the LU.
113 a8083063 Iustin Pop

114 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
115 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
116 a8083063 Iustin Pop
    code, or expected.
117 a8083063 Iustin Pop

118 a8083063 Iustin Pop
    """
119 a8083063 Iustin Pop
    raise NotImplementedError
120 a8083063 Iustin Pop
121 a8083063 Iustin Pop
  def BuildHooksEnv(self):
122 a8083063 Iustin Pop
    """Build hooks environment for this LU.
123 a8083063 Iustin Pop

124 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
125 a8083063 Iustin Pop
    containing the environment that will be used for running the
126 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
127 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
128 a8083063 Iustin Pop
    the hook should run after the execution.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
131 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
132 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
133 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
134 a8083063 Iustin Pop

135 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
136 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
137 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
138 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
139 a8083063 Iustin Pop
    None).
140 a8083063 Iustin Pop

141 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
142 a8083063 Iustin Pop
    not be called.
143 a8083063 Iustin Pop

144 a8083063 Iustin Pop
    """
145 a8083063 Iustin Pop
    raise NotImplementedError
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
148 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
149 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
150 a8083063 Iustin Pop

151 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
152 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
153 a8083063 Iustin Pop

154 a8083063 Iustin Pop
  """
155 a8083063 Iustin Pop
  HPATH = None
156 a8083063 Iustin Pop
  HTYPE = None
157 a8083063 Iustin Pop
158 a8083063 Iustin Pop
  def BuildHooksEnv(self):
159 a8083063 Iustin Pop
    """Build hooks env.
160 a8083063 Iustin Pop

161 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
162 a8083063 Iustin Pop

163 a8083063 Iustin Pop
    """
164 a8083063 Iustin Pop
    return
165 a8083063 Iustin Pop
166 a8083063 Iustin Pop
167 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
168 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
169 83120a01 Michael Hanselmann

170 83120a01 Michael Hanselmann
  Args:
171 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
172 83120a01 Michael Hanselmann

173 83120a01 Michael Hanselmann
  """
174 3312b702 Iustin Pop
  if not isinstance(nodes, list):
175 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
176 dcb93971 Michael Hanselmann
177 dcb93971 Michael Hanselmann
  if nodes:
178 3312b702 Iustin Pop
    wanted = []
179 dcb93971 Michael Hanselmann
180 dcb93971 Michael Hanselmann
    for name in nodes:
181 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
182 dcb93971 Michael Hanselmann
      if node is None:
183 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
184 3312b702 Iustin Pop
      wanted.append(node)
185 dcb93971 Michael Hanselmann
186 dcb93971 Michael Hanselmann
  else:
187 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
188 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
189 3312b702 Iustin Pop
190 3312b702 Iustin Pop
191 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
192 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
193 3312b702 Iustin Pop

194 3312b702 Iustin Pop
  Args:
195 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
196 3312b702 Iustin Pop

197 3312b702 Iustin Pop
  """
198 3312b702 Iustin Pop
  if not isinstance(instances, list):
199 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
200 3312b702 Iustin Pop
201 3312b702 Iustin Pop
  if instances:
202 3312b702 Iustin Pop
    wanted = []
203 3312b702 Iustin Pop
204 3312b702 Iustin Pop
    for name in instances:
205 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
206 3312b702 Iustin Pop
      if instance is None:
207 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
208 3312b702 Iustin Pop
      wanted.append(instance)
209 3312b702 Iustin Pop
210 3312b702 Iustin Pop
  else:
211 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
212 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
213 dcb93971 Michael Hanselmann
214 dcb93971 Michael Hanselmann
215 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
216 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
217 83120a01 Michael Hanselmann

218 83120a01 Michael Hanselmann
  Args:
219 83120a01 Michael Hanselmann
    static: Static fields
220 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
221 83120a01 Michael Hanselmann

222 83120a01 Michael Hanselmann
  """
223 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
224 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
225 dcb93971 Michael Hanselmann
226 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
227 dcb93971 Michael Hanselmann
228 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
229 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
230 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
231 3ecf6786 Iustin Pop
                                          difference(all_fields)))
232 dcb93971 Michael Hanselmann
233 dcb93971 Michael Hanselmann
234 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
235 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
236 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
237 ecb215b5 Michael Hanselmann

238 ecb215b5 Michael Hanselmann
  Args:
239 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
240 396e1b78 Michael Hanselmann
  """
241 396e1b78 Michael Hanselmann
  env = {
242 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
243 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
244 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
245 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
246 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
247 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
248 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
249 396e1b78 Michael Hanselmann
  }
250 396e1b78 Michael Hanselmann
251 396e1b78 Michael Hanselmann
  if nics:
252 396e1b78 Michael Hanselmann
    nic_count = len(nics)
253 396e1b78 Michael Hanselmann
    for idx, (ip, bridge) in enumerate(nics):
254 396e1b78 Michael Hanselmann
      if ip is None:
255 396e1b78 Michael Hanselmann
        ip = ""
256 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
257 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
258 396e1b78 Michael Hanselmann
  else:
259 396e1b78 Michael Hanselmann
    nic_count = 0
260 396e1b78 Michael Hanselmann
261 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
262 396e1b78 Michael Hanselmann
263 396e1b78 Michael Hanselmann
  return env
264 396e1b78 Michael Hanselmann
265 396e1b78 Michael Hanselmann
266 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
267 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
268 ecb215b5 Michael Hanselmann

269 ecb215b5 Michael Hanselmann
  Args:
270 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
271 ecb215b5 Michael Hanselmann
    override: dict of values to override
272 ecb215b5 Michael Hanselmann
  """
273 396e1b78 Michael Hanselmann
  args = {
274 396e1b78 Michael Hanselmann
    'name': instance.name,
275 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
276 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
277 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
278 396e1b78 Michael Hanselmann
    'status': instance.os,
279 396e1b78 Michael Hanselmann
    'memory': instance.memory,
280 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
281 396e1b78 Michael Hanselmann
    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
282 396e1b78 Michael Hanselmann
  }
283 396e1b78 Michael Hanselmann
  if override:
284 396e1b78 Michael Hanselmann
    args.update(override)
285 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
286 396e1b78 Michael Hanselmann
287 396e1b78 Michael Hanselmann
288 a8083063 Iustin Pop
def _UpdateEtcHosts(fullnode, ip):
289 a8083063 Iustin Pop
  """Ensure a node has a correct entry in /etc/hosts.
290 a8083063 Iustin Pop

291 a8083063 Iustin Pop
  Args:
292 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
293 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
294 a8083063 Iustin Pop

295 a8083063 Iustin Pop
  """
296 a8083063 Iustin Pop
  node = fullnode.split(".", 1)[0]
297 a8083063 Iustin Pop
298 a8083063 Iustin Pop
  f = open('/etc/hosts', 'r+')
299 a8083063 Iustin Pop
300 a8083063 Iustin Pop
  inthere = False
301 a8083063 Iustin Pop
302 a8083063 Iustin Pop
  save_lines = []
303 a8083063 Iustin Pop
  add_lines = []
304 a8083063 Iustin Pop
  removed = False
305 a8083063 Iustin Pop
306 a8083063 Iustin Pop
  while True:
307 a8083063 Iustin Pop
    rawline = f.readline()
308 a8083063 Iustin Pop
309 a8083063 Iustin Pop
    if not rawline:
310 a8083063 Iustin Pop
      # End of file
311 a8083063 Iustin Pop
      break
312 a8083063 Iustin Pop
313 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
314 a8083063 Iustin Pop
315 a8083063 Iustin Pop
    # Strip off comments
316 a8083063 Iustin Pop
    line = line.split('#')[0]
317 a8083063 Iustin Pop
318 a8083063 Iustin Pop
    if not line:
319 a8083063 Iustin Pop
      # Entire line was comment, skip
320 a8083063 Iustin Pop
      save_lines.append(rawline)
321 a8083063 Iustin Pop
      continue
322 a8083063 Iustin Pop
323 a8083063 Iustin Pop
    fields = line.split()
324 a8083063 Iustin Pop
325 a8083063 Iustin Pop
    haveall = True
326 a8083063 Iustin Pop
    havesome = False
327 a8083063 Iustin Pop
    for spec in [ ip, fullnode, node ]:
328 a8083063 Iustin Pop
      if spec not in fields:
329 a8083063 Iustin Pop
        haveall = False
330 a8083063 Iustin Pop
      if spec in fields:
331 a8083063 Iustin Pop
        havesome = True
332 a8083063 Iustin Pop
333 a8083063 Iustin Pop
    if haveall:
334 a8083063 Iustin Pop
      inthere = True
335 a8083063 Iustin Pop
      save_lines.append(rawline)
336 a8083063 Iustin Pop
      continue
337 a8083063 Iustin Pop
338 a8083063 Iustin Pop
    if havesome and not haveall:
339 a8083063 Iustin Pop
      # Line (old, or manual?) which is missing some.  Remove.
340 a8083063 Iustin Pop
      removed = True
341 a8083063 Iustin Pop
      continue
342 a8083063 Iustin Pop
343 a8083063 Iustin Pop
    save_lines.append(rawline)
344 a8083063 Iustin Pop
345 a8083063 Iustin Pop
  if not inthere:
346 a8083063 Iustin Pop
    add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
347 a8083063 Iustin Pop
348 a8083063 Iustin Pop
  if removed:
349 a8083063 Iustin Pop
    if add_lines:
350 a8083063 Iustin Pop
      save_lines = save_lines + add_lines
351 a8083063 Iustin Pop
352 a8083063 Iustin Pop
    # We removed a line, write a new file and replace old.
353 a8083063 Iustin Pop
    fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc')
354 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
355 a8083063 Iustin Pop
    newfile.write(''.join(save_lines))
356 a8083063 Iustin Pop
    newfile.close()
357 a8083063 Iustin Pop
    os.rename(tmpname, '/etc/hosts')
358 a8083063 Iustin Pop
359 a8083063 Iustin Pop
  elif add_lines:
360 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
361 a8083063 Iustin Pop
    f.seek(0, 2)
362 a8083063 Iustin Pop
    for add in add_lines:
363 a8083063 Iustin Pop
      f.write(add)
364 a8083063 Iustin Pop
365 a8083063 Iustin Pop
  f.close()
366 a8083063 Iustin Pop
367 a8083063 Iustin Pop
368 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
369 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
370 a8083063 Iustin Pop

371 a8083063 Iustin Pop
  Args:
372 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
373 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
374 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
375 a8083063 Iustin Pop

376 a8083063 Iustin Pop
  """
377 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
378 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
379 a8083063 Iustin Pop
  else:
380 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
381 a8083063 Iustin Pop
382 a8083063 Iustin Pop
  inthere = False
383 a8083063 Iustin Pop
384 a8083063 Iustin Pop
  save_lines = []
385 a8083063 Iustin Pop
  add_lines = []
386 a8083063 Iustin Pop
  removed = False
387 a8083063 Iustin Pop
388 a8083063 Iustin Pop
  while True:
389 a8083063 Iustin Pop
    rawline = f.readline()
390 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
391 a8083063 Iustin Pop
392 a8083063 Iustin Pop
    if not rawline:
393 a8083063 Iustin Pop
      # End of file
394 a8083063 Iustin Pop
      break
395 a8083063 Iustin Pop
396 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
397 a8083063 Iustin Pop
398 a8083063 Iustin Pop
    parts = line.split(' ')
399 a8083063 Iustin Pop
    fields = parts[0].split(',')
400 a8083063 Iustin Pop
    key = parts[2]
401 a8083063 Iustin Pop
402 a8083063 Iustin Pop
    haveall = True
403 a8083063 Iustin Pop
    havesome = False
404 a8083063 Iustin Pop
    for spec in [ ip, fullnode ]:
405 a8083063 Iustin Pop
      if spec not in fields:
406 a8083063 Iustin Pop
        haveall = False
407 a8083063 Iustin Pop
      if spec in fields:
408 a8083063 Iustin Pop
        havesome = True
409 a8083063 Iustin Pop
410 a8083063 Iustin Pop
    logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
411 a8083063 Iustin Pop
    if haveall and key == pubkey:
412 a8083063 Iustin Pop
      inthere = True
413 a8083063 Iustin Pop
      save_lines.append(rawline)
414 a8083063 Iustin Pop
      logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
415 a8083063 Iustin Pop
      continue
416 a8083063 Iustin Pop
417 a8083063 Iustin Pop
    if havesome and (not haveall or key != pubkey):
418 a8083063 Iustin Pop
      removed = True
419 a8083063 Iustin Pop
      logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
420 a8083063 Iustin Pop
      continue
421 a8083063 Iustin Pop
422 a8083063 Iustin Pop
    save_lines.append(rawline)
423 a8083063 Iustin Pop
424 a8083063 Iustin Pop
  if not inthere:
425 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
426 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
427 a8083063 Iustin Pop
428 a8083063 Iustin Pop
  if removed:
429 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
430 a8083063 Iustin Pop
431 a8083063 Iustin Pop
    # Write a new file and replace old.
432 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
433 82122173 Iustin Pop
                                   constants.DATA_DIR)
434 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
435 82122173 Iustin Pop
    try:
436 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
437 82122173 Iustin Pop
    finally:
438 82122173 Iustin Pop
      newfile.close()
439 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
440 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
441 a8083063 Iustin Pop
442 a8083063 Iustin Pop
  elif add_lines:
443 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
444 a8083063 Iustin Pop
    f.seek(0, 2)
445 a8083063 Iustin Pop
    for add in add_lines:
446 a8083063 Iustin Pop
      f.write(add)
447 a8083063 Iustin Pop
448 a8083063 Iustin Pop
  f.close()
449 a8083063 Iustin Pop
450 a8083063 Iustin Pop
451 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
452 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
453 a8083063 Iustin Pop

454 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
455 a8083063 Iustin Pop
  is the error message.
456 a8083063 Iustin Pop

457 a8083063 Iustin Pop
  """
458 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
459 a8083063 Iustin Pop
  if vgsize is None:
460 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
461 a8083063 Iustin Pop
  elif vgsize < 20480:
462 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
463 191a8385 Guido Trotter
            (vgname, vgsize))
464 a8083063 Iustin Pop
  return None
465 a8083063 Iustin Pop
466 a8083063 Iustin Pop
467 a8083063 Iustin Pop
def _InitSSHSetup(node):
468 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
469 a8083063 Iustin Pop

470 a8083063 Iustin Pop

471 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
472 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
473 a8083063 Iustin Pop

474 a8083063 Iustin Pop
  Args:
475 a8083063 Iustin Pop
    node: the name of this host as a fqdn
476 a8083063 Iustin Pop

477 a8083063 Iustin Pop
  """
478 a8083063 Iustin Pop
  if os.path.exists('/root/.ssh/id_dsa'):
479 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa')
480 a8083063 Iustin Pop
  if os.path.exists('/root/.ssh/id_dsa.pub'):
481 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa.pub')
482 a8083063 Iustin Pop
483 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/id_dsa')
484 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/id_dsa.pub')
485 a8083063 Iustin Pop
486 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
487 a8083063 Iustin Pop
                         "-f", "/root/.ssh/id_dsa",
488 a8083063 Iustin Pop
                         "-q", "-N", ""])
489 a8083063 Iustin Pop
  if result.failed:
490 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
491 3ecf6786 Iustin Pop
                             result.output)
492 a8083063 Iustin Pop
493 a8083063 Iustin Pop
  f = open('/root/.ssh/id_dsa.pub', 'r')
494 a8083063 Iustin Pop
  try:
495 a8083063 Iustin Pop
    utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192))
496 a8083063 Iustin Pop
  finally:
497 a8083063 Iustin Pop
    f.close()
498 a8083063 Iustin Pop
499 a8083063 Iustin Pop
500 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
501 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
502 a8083063 Iustin Pop

503 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
504 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
505 a8083063 Iustin Pop

506 a8083063 Iustin Pop
  """
507 a8083063 Iustin Pop
  # Create pseudo random password
508 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
509 a8083063 Iustin Pop
  # and write it into sstore
510 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
511 a8083063 Iustin Pop
512 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
513 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
514 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
515 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
516 a8083063 Iustin Pop
  if result.failed:
517 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
518 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
519 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
520 a8083063 Iustin Pop
521 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
522 a8083063 Iustin Pop
523 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
524 a8083063 Iustin Pop
525 a8083063 Iustin Pop
  if result.failed:
526 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
527 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
528 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
529 a8083063 Iustin Pop
530 a8083063 Iustin Pop
531 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
532 a8083063 Iustin Pop
  """Initialise the cluster.
533 a8083063 Iustin Pop

534 a8083063 Iustin Pop
  """
535 a8083063 Iustin Pop
  HPATH = "cluster-init"
536 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
537 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
538 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
539 a8083063 Iustin Pop
  REQ_CLUSTER = False
540 a8083063 Iustin Pop
541 a8083063 Iustin Pop
  def BuildHooksEnv(self):
542 a8083063 Iustin Pop
    """Build hooks env.
543 a8083063 Iustin Pop

544 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
545 a8083063 Iustin Pop
    ourselves in the post-run node list.
546 a8083063 Iustin Pop

547 a8083063 Iustin Pop
    """
548 396e1b78 Michael Hanselmann
    env = {
549 396e1b78 Michael Hanselmann
      "CLUSTER": self.op.cluster_name,
550 396e1b78 Michael Hanselmann
      "MASTER": self.hostname['hostname_full'],
551 396e1b78 Michael Hanselmann
      }
552 a8083063 Iustin Pop
    return env, [], [self.hostname['hostname_full']]
553 a8083063 Iustin Pop
554 a8083063 Iustin Pop
  def CheckPrereq(self):
555 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
556 a8083063 Iustin Pop

557 a8083063 Iustin Pop
    """
558 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
559 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
560 a8083063 Iustin Pop
561 a8083063 Iustin Pop
    hostname_local = socket.gethostname()
562 a8083063 Iustin Pop
    self.hostname = hostname = utils.LookupHostname(hostname_local)
563 a8083063 Iustin Pop
    if not hostname:
564 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot resolve my own hostname ('%s')" %
565 3ecf6786 Iustin Pop
                                 hostname_local)
566 a8083063 Iustin Pop
567 ff98055b Iustin Pop
    if hostname["hostname_full"] != hostname_local:
568 ff98055b Iustin Pop
      raise errors.OpPrereqError("My own hostname (%s) does not match the"
569 ff98055b Iustin Pop
                                 " resolver (%s): probably not using FQDN"
570 ff98055b Iustin Pop
                                 " for hostname." %
571 ff98055b Iustin Pop
                                 (hostname_local, hostname["hostname_full"]))
572 ff98055b Iustin Pop
573 130e907e Iustin Pop
    if hostname["ip"].startswith("127."):
574 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
575 130e907e Iustin Pop
                                 " range (%s). Please fix DNS or /etc/hosts." %
576 130e907e Iustin Pop
                                 (hostname["ip"],))
577 130e907e Iustin Pop
578 a8083063 Iustin Pop
    self.clustername = clustername = utils.LookupHostname(self.op.cluster_name)
579 a8083063 Iustin Pop
    if not clustername:
580 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot resolve given cluster name ('%s')"
581 3ecf6786 Iustin Pop
                                 % self.op.cluster_name)
582 a8083063 Iustin Pop
583 a8083063 Iustin Pop
    result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname['ip']])
584 a8083063 Iustin Pop
    if result.failed:
585 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
586 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
587 3ecf6786 Iustin Pop
                                 " belong to this host."
588 3ecf6786 Iustin Pop
                                 " Aborting." % hostname['ip'])
589 a8083063 Iustin Pop
590 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
591 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
592 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
593 a8083063 Iustin Pop
    if secondary_ip and secondary_ip != hostname['ip']:
594 a8083063 Iustin Pop
      result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip])
595 a8083063 Iustin Pop
      if result.failed:
596 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("You gave %s as secondary IP,\n"
597 3ecf6786 Iustin Pop
                                   "but it does not belong to this host." %
598 3ecf6786 Iustin Pop
                                   secondary_ip)
599 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
600 a8083063 Iustin Pop
601 a8083063 Iustin Pop
    # checks presence of the volume group given
602 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
603 a8083063 Iustin Pop
604 a8083063 Iustin Pop
    if vgstatus:
605 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
606 a8083063 Iustin Pop
607 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
608 a8083063 Iustin Pop
                    self.op.mac_prefix):
609 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
610 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
611 a8083063 Iustin Pop
612 a8083063 Iustin Pop
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
613 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
614 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
615 a8083063 Iustin Pop
616 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
617 880478f8 Iustin Pop
    if result.failed:
618 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
619 8925faaa Iustin Pop
                                 (self.op.master_netdev,
620 8925faaa Iustin Pop
                                  result.output.strip()))
621 880478f8 Iustin Pop
622 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
623 a8083063 Iustin Pop
    """Initialize the cluster.
624 a8083063 Iustin Pop

625 a8083063 Iustin Pop
    """
626 a8083063 Iustin Pop
    clustername = self.clustername
627 a8083063 Iustin Pop
    hostname = self.hostname
628 a8083063 Iustin Pop
629 a8083063 Iustin Pop
    # set up the simple store
630 a8083063 Iustin Pop
    ss = ssconf.SimpleStore()
631 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
632 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname['hostname_full'])
633 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername['ip'])
634 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
635 5fcdc80d Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername['hostname'])
636 a8083063 Iustin Pop
637 a8083063 Iustin Pop
    # set up the inter-node password and certificate
638 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
639 a8083063 Iustin Pop
640 a8083063 Iustin Pop
    # start the master ip
641 a8083063 Iustin Pop
    rpc.call_node_start_master(hostname['hostname_full'])
642 a8083063 Iustin Pop
643 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
644 a8083063 Iustin Pop
    f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r')
645 a8083063 Iustin Pop
    try:
646 a8083063 Iustin Pop
      sshline = f.read()
647 a8083063 Iustin Pop
    finally:
648 a8083063 Iustin Pop
      f.close()
649 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
650 a8083063 Iustin Pop
651 a8083063 Iustin Pop
    _UpdateEtcHosts(hostname['hostname_full'],
652 a8083063 Iustin Pop
                    hostname['ip'],
653 a8083063 Iustin Pop
                    )
654 a8083063 Iustin Pop
655 a8083063 Iustin Pop
    _UpdateKnownHosts(hostname['hostname_full'],
656 a8083063 Iustin Pop
                      hostname['ip'],
657 a8083063 Iustin Pop
                      sshkey,
658 a8083063 Iustin Pop
                      )
659 a8083063 Iustin Pop
660 a8083063 Iustin Pop
    _InitSSHSetup(hostname['hostname'])
661 a8083063 Iustin Pop
662 a8083063 Iustin Pop
    # init of cluster config file
663 a8083063 Iustin Pop
    cfgw = config.ConfigWriter()
664 a8083063 Iustin Pop
    cfgw.InitConfig(hostname['hostname'], hostname['ip'], self.secondary_ip,
665 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
666 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
667 a8083063 Iustin Pop
668 a8083063 Iustin Pop
669 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
670 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
671 a8083063 Iustin Pop

672 a8083063 Iustin Pop
  """
673 a8083063 Iustin Pop
  _OP_REQP = []
674 a8083063 Iustin Pop
675 a8083063 Iustin Pop
  def CheckPrereq(self):
676 a8083063 Iustin Pop
    """Check prerequisites.
677 a8083063 Iustin Pop

678 a8083063 Iustin Pop
    This checks whether the cluster is empty.
679 a8083063 Iustin Pop

680 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
681 a8083063 Iustin Pop

682 a8083063 Iustin Pop
    """
683 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
684 a8083063 Iustin Pop
685 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
686 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
687 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
688 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
689 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
690 db915bd1 Michael Hanselmann
    if instancelist:
691 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
692 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
693 a8083063 Iustin Pop
694 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
695 a8083063 Iustin Pop
    """Destroys the cluster.
696 a8083063 Iustin Pop

697 a8083063 Iustin Pop
    """
698 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa')
699 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa.pub')
700 880478f8 Iustin Pop
    rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
701 a8083063 Iustin Pop
702 a8083063 Iustin Pop
703 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
704 a8083063 Iustin Pop
  """Verifies the cluster status.
705 a8083063 Iustin Pop

706 a8083063 Iustin Pop
  """
707 a8083063 Iustin Pop
  _OP_REQP = []
708 a8083063 Iustin Pop
709 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
710 a8083063 Iustin Pop
                  remote_version, feedback_fn):
711 a8083063 Iustin Pop
    """Run multiple tests against a node.
712 a8083063 Iustin Pop

713 a8083063 Iustin Pop
    Test list:
714 a8083063 Iustin Pop
      - compares ganeti version
715 a8083063 Iustin Pop
      - checks vg existance and size > 20G
716 a8083063 Iustin Pop
      - checks config file checksum
717 a8083063 Iustin Pop
      - checks ssh to other nodes
718 a8083063 Iustin Pop

719 a8083063 Iustin Pop
    Args:
720 a8083063 Iustin Pop
      node: name of the node to check
721 a8083063 Iustin Pop
      file_list: required list of files
722 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
723 098c0958 Michael Hanselmann

724 a8083063 Iustin Pop
    """
725 a8083063 Iustin Pop
    # compares ganeti version
726 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
727 a8083063 Iustin Pop
    if not remote_version:
728 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
729 a8083063 Iustin Pop
      return True
730 a8083063 Iustin Pop
731 a8083063 Iustin Pop
    if local_version != remote_version:
732 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
733 a8083063 Iustin Pop
                      (local_version, node, remote_version))
734 a8083063 Iustin Pop
      return True
735 a8083063 Iustin Pop
736 a8083063 Iustin Pop
    # checks vg existance and size > 20G
737 a8083063 Iustin Pop
738 a8083063 Iustin Pop
    bad = False
739 a8083063 Iustin Pop
    if not vglist:
740 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
741 a8083063 Iustin Pop
                      (node,))
742 a8083063 Iustin Pop
      bad = True
743 a8083063 Iustin Pop
    else:
744 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
745 a8083063 Iustin Pop
      if vgstatus:
746 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
747 a8083063 Iustin Pop
        bad = True
748 a8083063 Iustin Pop
749 a8083063 Iustin Pop
    # checks config file checksum
750 a8083063 Iustin Pop
    # checks ssh to any
751 a8083063 Iustin Pop
752 a8083063 Iustin Pop
    if 'filelist' not in node_result:
753 a8083063 Iustin Pop
      bad = True
754 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
755 a8083063 Iustin Pop
    else:
756 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
757 a8083063 Iustin Pop
      for file_name in file_list:
758 a8083063 Iustin Pop
        if file_name not in remote_cksum:
759 a8083063 Iustin Pop
          bad = True
760 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
761 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
762 a8083063 Iustin Pop
          bad = True
763 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
764 a8083063 Iustin Pop
765 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
766 a8083063 Iustin Pop
      bad = True
767 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
768 a8083063 Iustin Pop
    else:
769 a8083063 Iustin Pop
      if node_result['nodelist']:
770 a8083063 Iustin Pop
        bad = True
771 a8083063 Iustin Pop
        for node in node_result['nodelist']:
772 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
773 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
774 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
775 a8083063 Iustin Pop
    if hyp_result is not None:
776 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
777 a8083063 Iustin Pop
    return bad
778 a8083063 Iustin Pop
779 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
780 a8083063 Iustin Pop
    """Verify an instance.
781 a8083063 Iustin Pop

782 a8083063 Iustin Pop
    This function checks to see if the required block devices are
783 a8083063 Iustin Pop
    available on the instance's node.
784 a8083063 Iustin Pop

785 a8083063 Iustin Pop
    """
786 a8083063 Iustin Pop
    bad = False
787 a8083063 Iustin Pop
788 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
789 a8083063 Iustin Pop
    if not instance in instancelist:
790 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
791 a8083063 Iustin Pop
                      (instance, instancelist))
792 a8083063 Iustin Pop
      bad = True
793 a8083063 Iustin Pop
794 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
795 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
796 a8083063 Iustin Pop
797 a8083063 Iustin Pop
    node_vol_should = {}
798 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
799 a8083063 Iustin Pop
800 a8083063 Iustin Pop
    for node in node_vol_should:
801 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
802 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
803 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
804 a8083063 Iustin Pop
                          (volume, node))
805 a8083063 Iustin Pop
          bad = True
806 a8083063 Iustin Pop
807 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
808 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
809 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
810 a8083063 Iustin Pop
                        (instance, node_current))
811 a8083063 Iustin Pop
        bad = True
812 a8083063 Iustin Pop
813 a8083063 Iustin Pop
    for node in node_instance:
814 a8083063 Iustin Pop
      if (not node == node_current):
815 a8083063 Iustin Pop
        if instance in node_instance[node]:
816 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
817 a8083063 Iustin Pop
                          (instance, node))
818 a8083063 Iustin Pop
          bad = True
819 a8083063 Iustin Pop
820 a8083063 Iustin Pop
    return not bad
821 a8083063 Iustin Pop
822 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
823 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
824 a8083063 Iustin Pop

825 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
826 a8083063 Iustin Pop
    reported as unknown.
827 a8083063 Iustin Pop

828 a8083063 Iustin Pop
    """
829 a8083063 Iustin Pop
    bad = False
830 a8083063 Iustin Pop
831 a8083063 Iustin Pop
    for node in node_vol_is:
832 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
833 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
834 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
835 a8083063 Iustin Pop
                      (volume, node))
836 a8083063 Iustin Pop
          bad = True
837 a8083063 Iustin Pop
    return bad
838 a8083063 Iustin Pop
839 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
840 a8083063 Iustin Pop
    """Verify the list of running instances.
841 a8083063 Iustin Pop

842 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
843 a8083063 Iustin Pop

844 a8083063 Iustin Pop
    """
845 a8083063 Iustin Pop
    bad = False
846 a8083063 Iustin Pop
    for node in node_instance:
847 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
848 a8083063 Iustin Pop
        if runninginstance not in instancelist:
849 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
850 a8083063 Iustin Pop
                          (runninginstance, node))
851 a8083063 Iustin Pop
          bad = True
852 a8083063 Iustin Pop
    return bad
853 a8083063 Iustin Pop
854 a8083063 Iustin Pop
  def CheckPrereq(self):
855 a8083063 Iustin Pop
    """Check prerequisites.
856 a8083063 Iustin Pop

857 a8083063 Iustin Pop
    This has no prerequisites.
858 a8083063 Iustin Pop

859 a8083063 Iustin Pop
    """
860 a8083063 Iustin Pop
    pass
861 a8083063 Iustin Pop
862 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
863 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
864 a8083063 Iustin Pop

865 a8083063 Iustin Pop
    """
866 a8083063 Iustin Pop
    bad = False
867 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
868 a8083063 Iustin Pop
    self.cfg.VerifyConfig()
869 a8083063 Iustin Pop
870 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
871 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
872 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
873 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
874 a8083063 Iustin Pop
    node_volume = {}
875 a8083063 Iustin Pop
    node_instance = {}
876 a8083063 Iustin Pop
877 a8083063 Iustin Pop
    # FIXME: verify OS list
878 a8083063 Iustin Pop
    # do local checksums
879 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
880 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
881 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
882 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
883 a8083063 Iustin Pop
884 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
885 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
886 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
887 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
888 a8083063 Iustin Pop
    node_verify_param = {
889 a8083063 Iustin Pop
      'filelist': file_names,
890 a8083063 Iustin Pop
      'nodelist': nodelist,
891 a8083063 Iustin Pop
      'hypervisor': None,
892 a8083063 Iustin Pop
      }
893 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
894 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
895 a8083063 Iustin Pop
896 a8083063 Iustin Pop
    for node in nodelist:
897 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
898 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
899 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
900 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
901 a8083063 Iustin Pop
      bad = bad or result
902 a8083063 Iustin Pop
903 a8083063 Iustin Pop
      # node_volume
904 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
905 a8083063 Iustin Pop
906 a8083063 Iustin Pop
      if type(volumeinfo) != dict:
907 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
908 a8083063 Iustin Pop
        bad = True
909 a8083063 Iustin Pop
        continue
910 a8083063 Iustin Pop
911 a8083063 Iustin Pop
      node_volume[node] = volumeinfo
912 a8083063 Iustin Pop
913 a8083063 Iustin Pop
      # node_instance
914 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
915 a8083063 Iustin Pop
      if type(nodeinstance) != list:
916 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
917 a8083063 Iustin Pop
        bad = True
918 a8083063 Iustin Pop
        continue
919 a8083063 Iustin Pop
920 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
921 a8083063 Iustin Pop
922 a8083063 Iustin Pop
    node_vol_should = {}
923 a8083063 Iustin Pop
924 a8083063 Iustin Pop
    for instance in instancelist:
925 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
926 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
927 a8083063 Iustin Pop
                                     feedback_fn)
928 a8083063 Iustin Pop
      bad = bad or result
929 a8083063 Iustin Pop
930 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
931 a8083063 Iustin Pop
932 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
933 a8083063 Iustin Pop
934 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
935 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
936 a8083063 Iustin Pop
                                       feedback_fn)
937 a8083063 Iustin Pop
    bad = bad or result
938 a8083063 Iustin Pop
939 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
940 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
941 a8083063 Iustin Pop
                                         feedback_fn)
942 a8083063 Iustin Pop
    bad = bad or result
943 a8083063 Iustin Pop
944 a8083063 Iustin Pop
    return int(bad)
945 a8083063 Iustin Pop
946 a8083063 Iustin Pop
947 a8083063 Iustin Pop
def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
948 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
949 a8083063 Iustin Pop

950 a8083063 Iustin Pop
  """
951 a8083063 Iustin Pop
  if not instance.disks:
952 a8083063 Iustin Pop
    return True
953 a8083063 Iustin Pop
954 a8083063 Iustin Pop
  if not oneshot:
955 a8083063 Iustin Pop
    logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
956 a8083063 Iustin Pop
957 a8083063 Iustin Pop
  node = instance.primary_node
958 a8083063 Iustin Pop
959 a8083063 Iustin Pop
  for dev in instance.disks:
960 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
961 a8083063 Iustin Pop
962 a8083063 Iustin Pop
  retries = 0
963 a8083063 Iustin Pop
  while True:
964 a8083063 Iustin Pop
    max_time = 0
965 a8083063 Iustin Pop
    done = True
966 a8083063 Iustin Pop
    cumul_degraded = False
967 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
968 a8083063 Iustin Pop
    if not rstats:
969 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
970 a8083063 Iustin Pop
      retries += 1
971 a8083063 Iustin Pop
      if retries >= 10:
972 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
973 3ecf6786 Iustin Pop
                                 " aborting." % node)
974 a8083063 Iustin Pop
      time.sleep(6)
975 a8083063 Iustin Pop
      continue
976 a8083063 Iustin Pop
    retries = 0
977 a8083063 Iustin Pop
    for i in range(len(rstats)):
978 a8083063 Iustin Pop
      mstat = rstats[i]
979 a8083063 Iustin Pop
      if mstat is None:
980 a8083063 Iustin Pop
        logger.ToStderr("Can't compute data for node %s/%s" %
981 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
982 a8083063 Iustin Pop
        continue
983 a8083063 Iustin Pop
      perc_done, est_time, is_degraded = mstat
984 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
985 a8083063 Iustin Pop
      if perc_done is not None:
986 a8083063 Iustin Pop
        done = False
987 a8083063 Iustin Pop
        if est_time is not None:
988 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
989 a8083063 Iustin Pop
          max_time = est_time
990 a8083063 Iustin Pop
        else:
991 a8083063 Iustin Pop
          rem_time = "no time estimate"
992 a8083063 Iustin Pop
        logger.ToStdout("- device %s: %5.2f%% done, %s" %
993 a8083063 Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
994 a8083063 Iustin Pop
    if done or oneshot:
995 a8083063 Iustin Pop
      break
996 a8083063 Iustin Pop
997 a8083063 Iustin Pop
    if unlock:
998 a8083063 Iustin Pop
      utils.Unlock('cmd')
999 a8083063 Iustin Pop
    try:
1000 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1001 a8083063 Iustin Pop
    finally:
1002 a8083063 Iustin Pop
      if unlock:
1003 a8083063 Iustin Pop
        utils.Lock('cmd')
1004 a8083063 Iustin Pop
1005 a8083063 Iustin Pop
  if done:
1006 a8083063 Iustin Pop
    logger.ToStdout("Instance %s's disks are in sync." % instance.name)
1007 a8083063 Iustin Pop
  return not cumul_degraded
1008 a8083063 Iustin Pop
1009 a8083063 Iustin Pop
1010 a8083063 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary):
1011 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1012 a8083063 Iustin Pop

1013 a8083063 Iustin Pop
  """
1014 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1015 a8083063 Iustin Pop
1016 a8083063 Iustin Pop
  result = True
1017 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1018 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1019 a8083063 Iustin Pop
    if not rstats:
1020 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1021 a8083063 Iustin Pop
      result = False
1022 a8083063 Iustin Pop
    else:
1023 a8083063 Iustin Pop
      result = result and (not rstats[5])
1024 a8083063 Iustin Pop
  if dev.children:
1025 a8083063 Iustin Pop
    for child in dev.children:
1026 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1027 a8083063 Iustin Pop
1028 a8083063 Iustin Pop
  return result
1029 a8083063 Iustin Pop
1030 a8083063 Iustin Pop
1031 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1032 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1033 a8083063 Iustin Pop

1034 a8083063 Iustin Pop
  """
1035 a8083063 Iustin Pop
  _OP_REQP = []
1036 a8083063 Iustin Pop
1037 a8083063 Iustin Pop
  def CheckPrereq(self):
1038 a8083063 Iustin Pop
    """Check prerequisites.
1039 a8083063 Iustin Pop

1040 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1041 a8083063 Iustin Pop

1042 a8083063 Iustin Pop
    """
1043 a8083063 Iustin Pop
    return
1044 a8083063 Iustin Pop
1045 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1046 a8083063 Iustin Pop
    """Compute the list of OSes.
1047 a8083063 Iustin Pop

1048 a8083063 Iustin Pop
    """
1049 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1050 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1051 a8083063 Iustin Pop
    if node_data == False:
1052 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1053 a8083063 Iustin Pop
    return node_data
1054 a8083063 Iustin Pop
1055 a8083063 Iustin Pop
1056 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1057 a8083063 Iustin Pop
  """Logical unit for removing a node.
1058 a8083063 Iustin Pop

1059 a8083063 Iustin Pop
  """
1060 a8083063 Iustin Pop
  HPATH = "node-remove"
1061 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1062 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1063 a8083063 Iustin Pop
1064 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1065 a8083063 Iustin Pop
    """Build hooks env.
1066 a8083063 Iustin Pop

1067 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1068 a8083063 Iustin Pop
    node would not allows itself to run.
1069 a8083063 Iustin Pop

1070 a8083063 Iustin Pop
    """
1071 396e1b78 Michael Hanselmann
    env = {
1072 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1073 396e1b78 Michael Hanselmann
      }
1074 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1075 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1076 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1077 a8083063 Iustin Pop
1078 a8083063 Iustin Pop
  def CheckPrereq(self):
1079 a8083063 Iustin Pop
    """Check prerequisites.
1080 a8083063 Iustin Pop

1081 a8083063 Iustin Pop
    This checks:
1082 a8083063 Iustin Pop
     - the node exists in the configuration
1083 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1084 a8083063 Iustin Pop
     - it's not the master
1085 a8083063 Iustin Pop

1086 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1087 a8083063 Iustin Pop

1088 a8083063 Iustin Pop
    """
1089 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1090 a8083063 Iustin Pop
    if node is None:
1091 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1092 a8083063 Iustin Pop
1093 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1094 a8083063 Iustin Pop
1095 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1096 a8083063 Iustin Pop
    if node.name == masternode:
1097 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1098 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1099 a8083063 Iustin Pop
1100 a8083063 Iustin Pop
    for instance_name in instance_list:
1101 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1102 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1103 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1104 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1105 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1106 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1107 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1108 a8083063 Iustin Pop
    self.op.node_name = node.name
1109 a8083063 Iustin Pop
    self.node = node
1110 a8083063 Iustin Pop
1111 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1112 a8083063 Iustin Pop
    """Removes the node from the cluster.
1113 a8083063 Iustin Pop

1114 a8083063 Iustin Pop
    """
1115 a8083063 Iustin Pop
    node = self.node
1116 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1117 a8083063 Iustin Pop
                node.name)
1118 a8083063 Iustin Pop
1119 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1120 a8083063 Iustin Pop
1121 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1122 a8083063 Iustin Pop
1123 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1124 a8083063 Iustin Pop
1125 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1126 a8083063 Iustin Pop
1127 a8083063 Iustin Pop
1128 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1129 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1130 a8083063 Iustin Pop

1131 a8083063 Iustin Pop
  """
1132 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1133 a8083063 Iustin Pop
1134 a8083063 Iustin Pop
  def CheckPrereq(self):
1135 a8083063 Iustin Pop
    """Check prerequisites.
1136 a8083063 Iustin Pop

1137 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1138 a8083063 Iustin Pop

1139 a8083063 Iustin Pop
    """
1140 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1141 a8083063 Iustin Pop
                                     "mtotal", "mnode", "mfree"])
1142 a8083063 Iustin Pop
1143 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1144 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1145 ec223efb Iustin Pop
                               "pip", "sip"],
1146 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1147 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1148 a8083063 Iustin Pop
1149 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1150 a8083063 Iustin Pop
1151 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1152 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1153 a8083063 Iustin Pop

1154 a8083063 Iustin Pop
    """
1155 246e180a Iustin Pop
    nodenames = self.wanted
1156 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1157 a8083063 Iustin Pop
1158 a8083063 Iustin Pop
    # begin data gathering
1159 a8083063 Iustin Pop
1160 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1161 a8083063 Iustin Pop
      live_data = {}
1162 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1163 a8083063 Iustin Pop
      for name in nodenames:
1164 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1165 a8083063 Iustin Pop
        if nodeinfo:
1166 a8083063 Iustin Pop
          live_data[name] = {
1167 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1168 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1169 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1170 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1171 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1172 a8083063 Iustin Pop
            }
1173 a8083063 Iustin Pop
        else:
1174 a8083063 Iustin Pop
          live_data[name] = {}
1175 a8083063 Iustin Pop
    else:
1176 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1177 a8083063 Iustin Pop
1178 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1179 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1180 a8083063 Iustin Pop
1181 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1182 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1183 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1184 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1185 a8083063 Iustin Pop
1186 ec223efb Iustin Pop
      for instance_name in instancelist:
1187 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1188 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1189 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1190 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1191 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1192 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1193 a8083063 Iustin Pop
1194 a8083063 Iustin Pop
    # end data gathering
1195 a8083063 Iustin Pop
1196 a8083063 Iustin Pop
    output = []
1197 a8083063 Iustin Pop
    for node in nodelist:
1198 a8083063 Iustin Pop
      node_output = []
1199 a8083063 Iustin Pop
      for field in self.op.output_fields:
1200 a8083063 Iustin Pop
        if field == "name":
1201 a8083063 Iustin Pop
          val = node.name
1202 ec223efb Iustin Pop
        elif field == "pinst_list":
1203 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1204 ec223efb Iustin Pop
        elif field == "sinst_list":
1205 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1206 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1207 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1208 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1209 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1210 a8083063 Iustin Pop
        elif field == "pip":
1211 a8083063 Iustin Pop
          val = node.primary_ip
1212 a8083063 Iustin Pop
        elif field == "sip":
1213 a8083063 Iustin Pop
          val = node.secondary_ip
1214 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1215 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1216 a8083063 Iustin Pop
        else:
1217 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1218 a8083063 Iustin Pop
        node_output.append(val)
1219 a8083063 Iustin Pop
      output.append(node_output)
1220 a8083063 Iustin Pop
1221 a8083063 Iustin Pop
    return output
1222 a8083063 Iustin Pop
1223 a8083063 Iustin Pop
1224 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1225 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1226 dcb93971 Michael Hanselmann

1227 dcb93971 Michael Hanselmann
  """
1228 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1229 dcb93971 Michael Hanselmann
1230 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1231 dcb93971 Michael Hanselmann
    """Check prerequisites.
1232 dcb93971 Michael Hanselmann

1233 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1234 dcb93971 Michael Hanselmann

1235 dcb93971 Michael Hanselmann
    """
1236 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1237 dcb93971 Michael Hanselmann
1238 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1239 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1240 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1241 dcb93971 Michael Hanselmann
1242 dcb93971 Michael Hanselmann
1243 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1244 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1245 dcb93971 Michael Hanselmann

1246 dcb93971 Michael Hanselmann
    """
1247 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1248 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1249 dcb93971 Michael Hanselmann
1250 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1251 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1252 dcb93971 Michael Hanselmann
1253 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1254 dcb93971 Michael Hanselmann
1255 dcb93971 Michael Hanselmann
    output = []
1256 dcb93971 Michael Hanselmann
    for node in nodenames:
1257 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1258 37d19eb2 Michael Hanselmann
        continue
1259 37d19eb2 Michael Hanselmann
1260 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1261 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1262 dcb93971 Michael Hanselmann
1263 dcb93971 Michael Hanselmann
      for vol in node_vols:
1264 dcb93971 Michael Hanselmann
        node_output = []
1265 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1266 dcb93971 Michael Hanselmann
          if field == "node":
1267 dcb93971 Michael Hanselmann
            val = node
1268 dcb93971 Michael Hanselmann
          elif field == "phys":
1269 dcb93971 Michael Hanselmann
            val = vol['dev']
1270 dcb93971 Michael Hanselmann
          elif field == "vg":
1271 dcb93971 Michael Hanselmann
            val = vol['vg']
1272 dcb93971 Michael Hanselmann
          elif field == "name":
1273 dcb93971 Michael Hanselmann
            val = vol['name']
1274 dcb93971 Michael Hanselmann
          elif field == "size":
1275 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1276 dcb93971 Michael Hanselmann
          elif field == "instance":
1277 dcb93971 Michael Hanselmann
            for inst in ilist:
1278 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1279 dcb93971 Michael Hanselmann
                continue
1280 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1281 dcb93971 Michael Hanselmann
                val = inst.name
1282 dcb93971 Michael Hanselmann
                break
1283 dcb93971 Michael Hanselmann
            else:
1284 dcb93971 Michael Hanselmann
              val = '-'
1285 dcb93971 Michael Hanselmann
          else:
1286 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1287 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1288 dcb93971 Michael Hanselmann
1289 dcb93971 Michael Hanselmann
        output.append(node_output)
1290 dcb93971 Michael Hanselmann
1291 dcb93971 Michael Hanselmann
    return output
1292 dcb93971 Michael Hanselmann
1293 dcb93971 Michael Hanselmann
1294 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1295 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1296 a8083063 Iustin Pop

1297 a8083063 Iustin Pop
  """
1298 a8083063 Iustin Pop
  HPATH = "node-add"
1299 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1300 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1301 a8083063 Iustin Pop
1302 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1303 a8083063 Iustin Pop
    """Build hooks env.
1304 a8083063 Iustin Pop

1305 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1306 a8083063 Iustin Pop

1307 a8083063 Iustin Pop
    """
1308 a8083063 Iustin Pop
    env = {
1309 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1310 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1311 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1312 a8083063 Iustin Pop
      }
1313 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1314 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1315 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1316 a8083063 Iustin Pop
1317 a8083063 Iustin Pop
  def CheckPrereq(self):
1318 a8083063 Iustin Pop
    """Check prerequisites.
1319 a8083063 Iustin Pop

1320 a8083063 Iustin Pop
    This checks:
1321 a8083063 Iustin Pop
     - the new node is not already in the config
1322 a8083063 Iustin Pop
     - it is resolvable
1323 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1324 a8083063 Iustin Pop

1325 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1326 a8083063 Iustin Pop

1327 a8083063 Iustin Pop
    """
1328 a8083063 Iustin Pop
    node_name = self.op.node_name
1329 a8083063 Iustin Pop
    cfg = self.cfg
1330 a8083063 Iustin Pop
1331 a8083063 Iustin Pop
    dns_data = utils.LookupHostname(node_name)
1332 a8083063 Iustin Pop
    if not dns_data:
1333 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is not resolvable" % node_name)
1334 a8083063 Iustin Pop
1335 a8083063 Iustin Pop
    node = dns_data['hostname']
1336 a8083063 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data['ip']
1337 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1338 a8083063 Iustin Pop
    if secondary_ip is None:
1339 a8083063 Iustin Pop
      secondary_ip = primary_ip
1340 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1341 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1342 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1343 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1344 a8083063 Iustin Pop
    if node in node_list:
1345 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1346 3ecf6786 Iustin Pop
                                 % node)
1347 a8083063 Iustin Pop
1348 a8083063 Iustin Pop
    for existing_node_name in node_list:
1349 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1350 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1351 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1352 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1353 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1354 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1355 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1356 a8083063 Iustin Pop
1357 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1358 a8083063 Iustin Pop
    # same as for the master
1359 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1360 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1361 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1362 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1363 a8083063 Iustin Pop
      if master_singlehomed:
1364 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1365 3ecf6786 Iustin Pop
                                   " new node has one")
1366 a8083063 Iustin Pop
      else:
1367 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1368 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1369 a8083063 Iustin Pop
1370 a8083063 Iustin Pop
    # checks reachablity
1371 a8083063 Iustin Pop
    command = ["fping", "-q", primary_ip]
1372 a8083063 Iustin Pop
    result = utils.RunCmd(command)
1373 a8083063 Iustin Pop
    if result.failed:
1374 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1375 a8083063 Iustin Pop
1376 a8083063 Iustin Pop
    if not newbie_singlehomed:
1377 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1378 a8083063 Iustin Pop
      command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip]
1379 a8083063 Iustin Pop
      result = utils.RunCmd(command)
1380 a8083063 Iustin Pop
      if result.failed:
1381 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node secondary ip not reachable by ping")
1382 a8083063 Iustin Pop
1383 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1384 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1385 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1386 a8083063 Iustin Pop
1387 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1388 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1389 a8083063 Iustin Pop

1390 a8083063 Iustin Pop
    """
1391 a8083063 Iustin Pop
    new_node = self.new_node
1392 a8083063 Iustin Pop
    node = new_node.name
1393 a8083063 Iustin Pop
1394 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1395 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1396 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1397 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1398 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1399 a8083063 Iustin Pop
    try:
1400 a8083063 Iustin Pop
      gntpem = f.read(8192)
1401 a8083063 Iustin Pop
    finally:
1402 a8083063 Iustin Pop
      f.close()
1403 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1404 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1405 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1406 a8083063 Iustin Pop
    # parsed by the shell sequence below
1407 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1408 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1409 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1410 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1411 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1412 a8083063 Iustin Pop
1413 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1414 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1415 a8083063 Iustin Pop
    # either by being constants or by the checks above
1416 a8083063 Iustin Pop
    ss = self.sstore
1417 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1418 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1419 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1420 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1421 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1422 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1423 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1424 a8083063 Iustin Pop
1425 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1426 a8083063 Iustin Pop
    if result.failed:
1427 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1428 3ecf6786 Iustin Pop
                               " output: %s" %
1429 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1430 a8083063 Iustin Pop
1431 a8083063 Iustin Pop
    # check connectivity
1432 a8083063 Iustin Pop
    time.sleep(4)
1433 a8083063 Iustin Pop
1434 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1435 a8083063 Iustin Pop
    if result:
1436 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1437 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1438 a8083063 Iustin Pop
                    (node, result))
1439 a8083063 Iustin Pop
      else:
1440 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1441 3ecf6786 Iustin Pop
                                 " node version %s" %
1442 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1443 a8083063 Iustin Pop
    else:
1444 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1445 a8083063 Iustin Pop
1446 a8083063 Iustin Pop
    # setup ssh on node
1447 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1448 a8083063 Iustin Pop
    keyarray = []
1449 a8083063 Iustin Pop
    keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub",
1450 a8083063 Iustin Pop
                "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub",
1451 a8083063 Iustin Pop
                "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"]
1452 a8083063 Iustin Pop
1453 a8083063 Iustin Pop
    for i in keyfiles:
1454 a8083063 Iustin Pop
      f = open(i, 'r')
1455 a8083063 Iustin Pop
      try:
1456 a8083063 Iustin Pop
        keyarray.append(f.read())
1457 a8083063 Iustin Pop
      finally:
1458 a8083063 Iustin Pop
        f.close()
1459 a8083063 Iustin Pop
1460 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1461 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1462 a8083063 Iustin Pop
1463 a8083063 Iustin Pop
    if not result:
1464 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1465 a8083063 Iustin Pop
1466 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1467 a8083063 Iustin Pop
    _UpdateEtcHosts(new_node.name, new_node.primary_ip)
1468 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1469 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1470 a8083063 Iustin Pop
1471 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1472 a8083063 Iustin Pop
      result = ssh.SSHCall(node, "root",
1473 a8083063 Iustin Pop
                           "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip)
1474 a8083063 Iustin Pop
      if result.failed:
1475 3ecf6786 Iustin Pop
        raise errors.OpExecError("Node claims it doesn't have the"
1476 3ecf6786 Iustin Pop
                                 " secondary ip you gave (%s).\n"
1477 3ecf6786 Iustin Pop
                                 "Please fix and re-run this command." %
1478 3ecf6786 Iustin Pop
                                 new_node.secondary_ip)
1479 a8083063 Iustin Pop
1480 ff98055b Iustin Pop
    success, msg = ssh.VerifyNodeHostname(node)
1481 ff98055b Iustin Pop
    if not success:
1482 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1483 ff98055b Iustin Pop
                               " than the one the resolver gives: %s.\n"
1484 ff98055b Iustin Pop
                               "Please fix and re-run this command." %
1485 ff98055b Iustin Pop
                               (node, msg))
1486 ff98055b Iustin Pop
1487 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1488 a8083063 Iustin Pop
    # including the node just added
1489 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1490 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1491 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1492 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1493 a8083063 Iustin Pop
1494 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1495 82122173 Iustin Pop
    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
1496 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1497 a8083063 Iustin Pop
      for to_node in dist_nodes:
1498 a8083063 Iustin Pop
        if not result[to_node]:
1499 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1500 a8083063 Iustin Pop
                       (fname, to_node))
1501 a8083063 Iustin Pop
1502 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1503 a8083063 Iustin Pop
    for fname in to_copy:
1504 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1505 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1506 a8083063 Iustin Pop
1507 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1508 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1509 a8083063 Iustin Pop
1510 a8083063 Iustin Pop
1511 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1512 a8083063 Iustin Pop
  """Failover the master node to the current node.
1513 a8083063 Iustin Pop

1514 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1515 a8083063 Iustin Pop

1516 a8083063 Iustin Pop
  """
1517 a8083063 Iustin Pop
  HPATH = "master-failover"
1518 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1519 a8083063 Iustin Pop
  REQ_MASTER = False
1520 a8083063 Iustin Pop
  _OP_REQP = []
1521 a8083063 Iustin Pop
1522 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1523 a8083063 Iustin Pop
    """Build hooks env.
1524 a8083063 Iustin Pop

1525 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1526 a8083063 Iustin Pop
    the nodes in the post phase.
1527 a8083063 Iustin Pop

1528 a8083063 Iustin Pop
    """
1529 a8083063 Iustin Pop
    env = {
1530 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1531 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1532 a8083063 Iustin Pop
      }
1533 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1534 a8083063 Iustin Pop
1535 a8083063 Iustin Pop
  def CheckPrereq(self):
1536 a8083063 Iustin Pop
    """Check prerequisites.
1537 a8083063 Iustin Pop

1538 a8083063 Iustin Pop
    This checks that we are not already the master.
1539 a8083063 Iustin Pop

1540 a8083063 Iustin Pop
    """
1541 a8083063 Iustin Pop
    self.new_master = socket.gethostname()
1542 a8083063 Iustin Pop
1543 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1544 a8083063 Iustin Pop
1545 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1546 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1547 3ecf6786 Iustin Pop
                                 " where you want the new master to be.\n"
1548 3ecf6786 Iustin Pop
                                 "%s is already the master" %
1549 3ecf6786 Iustin Pop
                                 self.old_master)
1550 a8083063 Iustin Pop
1551 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1552 a8083063 Iustin Pop
    """Failover the master node.
1553 a8083063 Iustin Pop

1554 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1555 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1556 a8083063 Iustin Pop
    master.
1557 a8083063 Iustin Pop

1558 a8083063 Iustin Pop
    """
1559 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1560 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1561 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1562 a8083063 Iustin Pop
1563 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1564 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1565 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1566 a8083063 Iustin Pop
1567 880478f8 Iustin Pop
    ss = self.sstore
1568 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1569 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1570 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1571 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1572 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1573 880478f8 Iustin Pop
1574 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1575 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1576 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1577 880478f8 Iustin Pop
      feedback_fn("Error in activating the master IP on the new master,\n"
1578 880478f8 Iustin Pop
                  "please fix manually.")
1579 a8083063 Iustin Pop
1580 a8083063 Iustin Pop
1581 a8083063 Iustin Pop
1582 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1583 a8083063 Iustin Pop
  """Query cluster configuration.
1584 a8083063 Iustin Pop

1585 a8083063 Iustin Pop
  """
1586 a8083063 Iustin Pop
  _OP_REQP = []
1587 59322403 Iustin Pop
  REQ_MASTER = False
1588 a8083063 Iustin Pop
1589 a8083063 Iustin Pop
  def CheckPrereq(self):
1590 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1591 a8083063 Iustin Pop

1592 a8083063 Iustin Pop
    """
1593 a8083063 Iustin Pop
    pass
1594 a8083063 Iustin Pop
1595 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1596 a8083063 Iustin Pop
    """Return cluster config.
1597 a8083063 Iustin Pop

1598 a8083063 Iustin Pop
    """
1599 a8083063 Iustin Pop
    result = {
1600 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1601 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1602 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1603 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1604 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1605 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1606 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1607 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1608 a8083063 Iustin Pop
      }
1609 a8083063 Iustin Pop
1610 a8083063 Iustin Pop
    return result
1611 a8083063 Iustin Pop
1612 a8083063 Iustin Pop
1613 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1614 a8083063 Iustin Pop
  """Copy file to cluster.
1615 a8083063 Iustin Pop

1616 a8083063 Iustin Pop
  """
1617 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1618 a8083063 Iustin Pop
1619 a8083063 Iustin Pop
  def CheckPrereq(self):
1620 a8083063 Iustin Pop
    """Check prerequisites.
1621 a8083063 Iustin Pop

1622 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1623 a8083063 Iustin Pop
    of nodes is valid.
1624 a8083063 Iustin Pop

1625 a8083063 Iustin Pop
    """
1626 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1627 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1628 dcb93971 Michael Hanselmann
1629 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1630 a8083063 Iustin Pop
1631 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1632 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1633 a8083063 Iustin Pop

1634 a8083063 Iustin Pop
    Args:
1635 a8083063 Iustin Pop
      opts - class with options as members
1636 a8083063 Iustin Pop
      args - list containing a single element, the file name
1637 a8083063 Iustin Pop
    Opts used:
1638 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1639 a8083063 Iustin Pop

1640 a8083063 Iustin Pop
    """
1641 a8083063 Iustin Pop
    filename = self.op.filename
1642 a8083063 Iustin Pop
1643 a8083063 Iustin Pop
    myname = socket.gethostname()
1644 a8083063 Iustin Pop
1645 a7ba5e53 Iustin Pop
    for node in self.nodes:
1646 a8083063 Iustin Pop
      if node == myname:
1647 a8083063 Iustin Pop
        continue
1648 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1649 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1650 a8083063 Iustin Pop
1651 a8083063 Iustin Pop
1652 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1653 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1654 a8083063 Iustin Pop

1655 a8083063 Iustin Pop
  """
1656 a8083063 Iustin Pop
  _OP_REQP = []
1657 a8083063 Iustin Pop
1658 a8083063 Iustin Pop
  def CheckPrereq(self):
1659 a8083063 Iustin Pop
    """No prerequisites.
1660 a8083063 Iustin Pop

1661 a8083063 Iustin Pop
    """
1662 a8083063 Iustin Pop
    pass
1663 a8083063 Iustin Pop
1664 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1665 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1666 a8083063 Iustin Pop

1667 a8083063 Iustin Pop
    """
1668 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1669 a8083063 Iustin Pop
1670 a8083063 Iustin Pop
1671 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1672 a8083063 Iustin Pop
  """Run a command on some nodes.
1673 a8083063 Iustin Pop

1674 a8083063 Iustin Pop
  """
1675 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1676 a8083063 Iustin Pop
1677 a8083063 Iustin Pop
  def CheckPrereq(self):
1678 a8083063 Iustin Pop
    """Check prerequisites.
1679 a8083063 Iustin Pop

1680 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1681 a8083063 Iustin Pop

1682 a8083063 Iustin Pop
    """
1683 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1684 a8083063 Iustin Pop
1685 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1686 a8083063 Iustin Pop
    """Run a command on some nodes.
1687 a8083063 Iustin Pop

1688 a8083063 Iustin Pop
    """
1689 a8083063 Iustin Pop
    data = []
1690 a8083063 Iustin Pop
    for node in self.nodes:
1691 a7ba5e53 Iustin Pop
      result = ssh.SSHCall(node, "root", self.op.command)
1692 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1693 a8083063 Iustin Pop
1694 a8083063 Iustin Pop
    return data
1695 a8083063 Iustin Pop
1696 a8083063 Iustin Pop
1697 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1698 a8083063 Iustin Pop
  """Bring up an instance's disks.
1699 a8083063 Iustin Pop

1700 a8083063 Iustin Pop
  """
1701 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1702 a8083063 Iustin Pop
1703 a8083063 Iustin Pop
  def CheckPrereq(self):
1704 a8083063 Iustin Pop
    """Check prerequisites.
1705 a8083063 Iustin Pop

1706 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1707 a8083063 Iustin Pop

1708 a8083063 Iustin Pop
    """
1709 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1710 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1711 a8083063 Iustin Pop
    if instance is None:
1712 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1713 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1714 a8083063 Iustin Pop
    self.instance = instance
1715 a8083063 Iustin Pop
1716 a8083063 Iustin Pop
1717 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1718 a8083063 Iustin Pop
    """Activate the disks.
1719 a8083063 Iustin Pop

1720 a8083063 Iustin Pop
    """
1721 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1722 a8083063 Iustin Pop
    if not disks_ok:
1723 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1724 a8083063 Iustin Pop
1725 a8083063 Iustin Pop
    return disks_info
1726 a8083063 Iustin Pop
1727 a8083063 Iustin Pop
1728 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1729 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1730 a8083063 Iustin Pop

1731 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1732 a8083063 Iustin Pop

1733 a8083063 Iustin Pop
  Args:
1734 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1735 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1736 a8083063 Iustin Pop
                        in an error return from the function
1737 a8083063 Iustin Pop

1738 a8083063 Iustin Pop
  Returns:
1739 a8083063 Iustin Pop
    false if the operation failed
1740 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1741 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1742 a8083063 Iustin Pop
  """
1743 a8083063 Iustin Pop
  device_info = []
1744 a8083063 Iustin Pop
  disks_ok = True
1745 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1746 a8083063 Iustin Pop
    master_result = None
1747 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1748 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1749 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1750 a8083063 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, is_primary)
1751 a8083063 Iustin Pop
      if not result:
1752 a8083063 Iustin Pop
        logger.Error("could not prepare block device %s on node %s (is_pri"
1753 a8083063 Iustin Pop
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1754 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1755 a8083063 Iustin Pop
          disks_ok = False
1756 a8083063 Iustin Pop
      if is_primary:
1757 a8083063 Iustin Pop
        master_result = result
1758 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1759 a8083063 Iustin Pop
                        master_result))
1760 a8083063 Iustin Pop
1761 a8083063 Iustin Pop
  return disks_ok, device_info
1762 a8083063 Iustin Pop
1763 a8083063 Iustin Pop
1764 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1765 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1766 3ecf6786 Iustin Pop

1767 3ecf6786 Iustin Pop
  """
1768 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1769 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1770 fe7b0351 Michael Hanselmann
  if not disks_ok:
1771 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1772 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1773 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1774 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1775 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1776 fe7b0351 Michael Hanselmann
1777 fe7b0351 Michael Hanselmann
1778 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1779 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1780 a8083063 Iustin Pop

1781 a8083063 Iustin Pop
  """
1782 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1783 a8083063 Iustin Pop
1784 a8083063 Iustin Pop
  def CheckPrereq(self):
1785 a8083063 Iustin Pop
    """Check prerequisites.
1786 a8083063 Iustin Pop

1787 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1788 a8083063 Iustin Pop

1789 a8083063 Iustin Pop
    """
1790 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1791 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1792 a8083063 Iustin Pop
    if instance is None:
1793 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1794 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1795 a8083063 Iustin Pop
    self.instance = instance
1796 a8083063 Iustin Pop
1797 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1798 a8083063 Iustin Pop
    """Deactivate the disks
1799 a8083063 Iustin Pop

1800 a8083063 Iustin Pop
    """
1801 a8083063 Iustin Pop
    instance = self.instance
1802 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1803 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1804 a8083063 Iustin Pop
    if not type(ins_l) is list:
1805 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1806 3ecf6786 Iustin Pop
                               instance.primary_node)
1807 a8083063 Iustin Pop
1808 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1809 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1810 3ecf6786 Iustin Pop
                               " block devices.")
1811 a8083063 Iustin Pop
1812 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1813 a8083063 Iustin Pop
1814 a8083063 Iustin Pop
1815 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1816 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1817 a8083063 Iustin Pop

1818 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1819 a8083063 Iustin Pop

1820 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1821 a8083063 Iustin Pop
  ignored.
1822 a8083063 Iustin Pop

1823 a8083063 Iustin Pop
  """
1824 a8083063 Iustin Pop
  result = True
1825 a8083063 Iustin Pop
  for disk in instance.disks:
1826 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1827 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1828 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1829 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1830 a8083063 Iustin Pop
                     (disk.iv_name, node))
1831 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1832 a8083063 Iustin Pop
          result = False
1833 a8083063 Iustin Pop
  return result
1834 a8083063 Iustin Pop
1835 a8083063 Iustin Pop
1836 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1837 a8083063 Iustin Pop
  """Starts an instance.
1838 a8083063 Iustin Pop

1839 a8083063 Iustin Pop
  """
1840 a8083063 Iustin Pop
  HPATH = "instance-start"
1841 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1842 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1843 a8083063 Iustin Pop
1844 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1845 a8083063 Iustin Pop
    """Build hooks env.
1846 a8083063 Iustin Pop

1847 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1848 a8083063 Iustin Pop

1849 a8083063 Iustin Pop
    """
1850 a8083063 Iustin Pop
    env = {
1851 a8083063 Iustin Pop
      "FORCE": self.op.force,
1852 a8083063 Iustin Pop
      }
1853 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1854 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1855 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1856 a8083063 Iustin Pop
    return env, nl, nl
1857 a8083063 Iustin Pop
1858 a8083063 Iustin Pop
  def CheckPrereq(self):
1859 a8083063 Iustin Pop
    """Check prerequisites.
1860 a8083063 Iustin Pop

1861 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1862 a8083063 Iustin Pop

1863 a8083063 Iustin Pop
    """
1864 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1865 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1866 a8083063 Iustin Pop
    if instance is None:
1867 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1868 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1869 a8083063 Iustin Pop
1870 a8083063 Iustin Pop
    # check bridges existance
1871 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
1872 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
1873 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("one or more target bridges %s does not"
1874 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
1875 3ecf6786 Iustin Pop
                                 (brlist, instance.primary_node))
1876 a8083063 Iustin Pop
1877 a8083063 Iustin Pop
    self.instance = instance
1878 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1879 a8083063 Iustin Pop
1880 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1881 a8083063 Iustin Pop
    """Start the instance.
1882 a8083063 Iustin Pop

1883 a8083063 Iustin Pop
    """
1884 a8083063 Iustin Pop
    instance = self.instance
1885 a8083063 Iustin Pop
    force = self.op.force
1886 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1887 a8083063 Iustin Pop
1888 a8083063 Iustin Pop
    node_current = instance.primary_node
1889 a8083063 Iustin Pop
1890 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1891 a8083063 Iustin Pop
    if not nodeinfo:
1892 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact node %s for infos" %
1893 3ecf6786 Iustin Pop
                               (node_current))
1894 a8083063 Iustin Pop
1895 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
1896 a8083063 Iustin Pop
    memory = instance.memory
1897 a8083063 Iustin Pop
    if memory > freememory:
1898 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to start instance"
1899 3ecf6786 Iustin Pop
                               " %s on node %s"
1900 3ecf6786 Iustin Pop
                               " needed %s MiB, available %s MiB" %
1901 3ecf6786 Iustin Pop
                               (instance.name, node_current, memory,
1902 3ecf6786 Iustin Pop
                                freememory))
1903 a8083063 Iustin Pop
1904 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
1905 a8083063 Iustin Pop
1906 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
1907 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
1908 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
1909 a8083063 Iustin Pop
1910 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
1911 a8083063 Iustin Pop
1912 a8083063 Iustin Pop
1913 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
1914 a8083063 Iustin Pop
  """Shutdown an instance.
1915 a8083063 Iustin Pop

1916 a8083063 Iustin Pop
  """
1917 a8083063 Iustin Pop
  HPATH = "instance-stop"
1918 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1919 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1920 a8083063 Iustin Pop
1921 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1922 a8083063 Iustin Pop
    """Build hooks env.
1923 a8083063 Iustin Pop

1924 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1925 a8083063 Iustin Pop

1926 a8083063 Iustin Pop
    """
1927 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
1928 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1929 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1930 a8083063 Iustin Pop
    return env, nl, nl
1931 a8083063 Iustin Pop
1932 a8083063 Iustin Pop
  def CheckPrereq(self):
1933 a8083063 Iustin Pop
    """Check prerequisites.
1934 a8083063 Iustin Pop

1935 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1936 a8083063 Iustin Pop

1937 a8083063 Iustin Pop
    """
1938 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1939 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1940 a8083063 Iustin Pop
    if instance is None:
1941 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1942 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1943 a8083063 Iustin Pop
    self.instance = instance
1944 a8083063 Iustin Pop
1945 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1946 a8083063 Iustin Pop
    """Shutdown the instance.
1947 a8083063 Iustin Pop

1948 a8083063 Iustin Pop
    """
1949 a8083063 Iustin Pop
    instance = self.instance
1950 a8083063 Iustin Pop
    node_current = instance.primary_node
1951 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
1952 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
1953 a8083063 Iustin Pop
1954 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
1955 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1956 a8083063 Iustin Pop
1957 a8083063 Iustin Pop
1958 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
1959 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
1960 fe7b0351 Michael Hanselmann

1961 fe7b0351 Michael Hanselmann
  """
1962 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
1963 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
1964 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
1965 fe7b0351 Michael Hanselmann
1966 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
1967 fe7b0351 Michael Hanselmann
    """Build hooks env.
1968 fe7b0351 Michael Hanselmann

1969 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
1970 fe7b0351 Michael Hanselmann

1971 fe7b0351 Michael Hanselmann
    """
1972 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
1973 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1974 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
1975 fe7b0351 Michael Hanselmann
    return env, nl, nl
1976 fe7b0351 Michael Hanselmann
1977 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
1978 fe7b0351 Michael Hanselmann
    """Check prerequisites.
1979 fe7b0351 Michael Hanselmann

1980 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
1981 fe7b0351 Michael Hanselmann

1982 fe7b0351 Michael Hanselmann
    """
1983 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
1984 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
1985 fe7b0351 Michael Hanselmann
    if instance is None:
1986 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1987 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1988 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
1989 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
1990 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1991 fe7b0351 Michael Hanselmann
    if instance.status != "down":
1992 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
1993 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1994 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
1995 fe7b0351 Michael Hanselmann
    if remote_info:
1996 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
1997 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
1998 3ecf6786 Iustin Pop
                                  instance.primary_node))
1999 d0834de3 Michael Hanselmann
2000 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2001 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2002 d0834de3 Michael Hanselmann
      # OS verification
2003 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2004 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2005 d0834de3 Michael Hanselmann
      if pnode is None:
2006 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2007 3ecf6786 Iustin Pop
                                   self.op.pnode)
2008 d0834de3 Michael Hanselmann
      os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2009 d0834de3 Michael Hanselmann
      if not isinstance(os_obj, objects.OS):
2010 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2011 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2012 d0834de3 Michael Hanselmann
2013 fe7b0351 Michael Hanselmann
    self.instance = instance
2014 fe7b0351 Michael Hanselmann
2015 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2016 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2017 fe7b0351 Michael Hanselmann

2018 fe7b0351 Michael Hanselmann
    """
2019 fe7b0351 Michael Hanselmann
    inst = self.instance
2020 fe7b0351 Michael Hanselmann
2021 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2022 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2023 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2024 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2025 d0834de3 Michael Hanselmann
2026 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2027 fe7b0351 Michael Hanselmann
    try:
2028 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2029 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2030 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not install OS for instance %s "
2031 3ecf6786 Iustin Pop
                                 "on node %s" %
2032 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2033 fe7b0351 Michael Hanselmann
    finally:
2034 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2035 fe7b0351 Michael Hanselmann
2036 fe7b0351 Michael Hanselmann
2037 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2038 a8083063 Iustin Pop
  """Remove an instance.
2039 a8083063 Iustin Pop

2040 a8083063 Iustin Pop
  """
2041 a8083063 Iustin Pop
  HPATH = "instance-remove"
2042 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2043 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2044 a8083063 Iustin Pop
2045 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2046 a8083063 Iustin Pop
    """Build hooks env.
2047 a8083063 Iustin Pop

2048 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2049 a8083063 Iustin Pop

2050 a8083063 Iustin Pop
    """
2051 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2052 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2053 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2054 a8083063 Iustin Pop
    return env, nl, nl
2055 a8083063 Iustin Pop
2056 a8083063 Iustin Pop
  def CheckPrereq(self):
2057 a8083063 Iustin Pop
    """Check prerequisites.
2058 a8083063 Iustin Pop

2059 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2060 a8083063 Iustin Pop

2061 a8083063 Iustin Pop
    """
2062 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2063 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2064 a8083063 Iustin Pop
    if instance is None:
2065 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2066 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2067 a8083063 Iustin Pop
    self.instance = instance
2068 a8083063 Iustin Pop
2069 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2070 a8083063 Iustin Pop
    """Remove the instance.
2071 a8083063 Iustin Pop

2072 a8083063 Iustin Pop
    """
2073 a8083063 Iustin Pop
    instance = self.instance
2074 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2075 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2076 a8083063 Iustin Pop
2077 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2078 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2079 3ecf6786 Iustin Pop
                               (instance.name, instance.primary_node))
2080 a8083063 Iustin Pop
2081 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2082 a8083063 Iustin Pop
2083 a8083063 Iustin Pop
    _RemoveDisks(instance, self.cfg)
2084 a8083063 Iustin Pop
2085 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2086 a8083063 Iustin Pop
2087 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2088 a8083063 Iustin Pop
2089 a8083063 Iustin Pop
2090 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2091 a8083063 Iustin Pop
  """Logical unit for querying instances.
2092 a8083063 Iustin Pop

2093 a8083063 Iustin Pop
  """
2094 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2095 a8083063 Iustin Pop
2096 a8083063 Iustin Pop
  def CheckPrereq(self):
2097 a8083063 Iustin Pop
    """Check prerequisites.
2098 a8083063 Iustin Pop

2099 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2100 a8083063 Iustin Pop

2101 a8083063 Iustin Pop
    """
2102 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
2103 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2104 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2105 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2106 644eeef9 Iustin Pop
                               "sda_size", "sdb_size"],
2107 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2108 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2109 a8083063 Iustin Pop
2110 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2111 069dcc86 Iustin Pop
2112 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2113 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2114 a8083063 Iustin Pop

2115 a8083063 Iustin Pop
    """
2116 069dcc86 Iustin Pop
    instance_names = self.wanted
2117 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2118 a8083063 Iustin Pop
                     in instance_names]
2119 a8083063 Iustin Pop
2120 a8083063 Iustin Pop
    # begin data gathering
2121 a8083063 Iustin Pop
2122 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2123 a8083063 Iustin Pop
2124 a8083063 Iustin Pop
    bad_nodes = []
2125 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2126 a8083063 Iustin Pop
      live_data = {}
2127 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2128 a8083063 Iustin Pop
      for name in nodes:
2129 a8083063 Iustin Pop
        result = node_data[name]
2130 a8083063 Iustin Pop
        if result:
2131 a8083063 Iustin Pop
          live_data.update(result)
2132 a8083063 Iustin Pop
        elif result == False:
2133 a8083063 Iustin Pop
          bad_nodes.append(name)
2134 a8083063 Iustin Pop
        # else no instance is alive
2135 a8083063 Iustin Pop
    else:
2136 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2137 a8083063 Iustin Pop
2138 a8083063 Iustin Pop
    # end data gathering
2139 a8083063 Iustin Pop
2140 a8083063 Iustin Pop
    output = []
2141 a8083063 Iustin Pop
    for instance in instance_list:
2142 a8083063 Iustin Pop
      iout = []
2143 a8083063 Iustin Pop
      for field in self.op.output_fields:
2144 a8083063 Iustin Pop
        if field == "name":
2145 a8083063 Iustin Pop
          val = instance.name
2146 a8083063 Iustin Pop
        elif field == "os":
2147 a8083063 Iustin Pop
          val = instance.os
2148 a8083063 Iustin Pop
        elif field == "pnode":
2149 a8083063 Iustin Pop
          val = instance.primary_node
2150 a8083063 Iustin Pop
        elif field == "snodes":
2151 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2152 a8083063 Iustin Pop
        elif field == "admin_state":
2153 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2154 a8083063 Iustin Pop
        elif field == "oper_state":
2155 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2156 8a23d2d3 Iustin Pop
            val = None
2157 a8083063 Iustin Pop
          else:
2158 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2159 a8083063 Iustin Pop
        elif field == "admin_ram":
2160 a8083063 Iustin Pop
          val = instance.memory
2161 a8083063 Iustin Pop
        elif field == "oper_ram":
2162 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2163 8a23d2d3 Iustin Pop
            val = None
2164 a8083063 Iustin Pop
          elif instance.name in live_data:
2165 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2166 a8083063 Iustin Pop
          else:
2167 a8083063 Iustin Pop
            val = "-"
2168 a8083063 Iustin Pop
        elif field == "disk_template":
2169 a8083063 Iustin Pop
          val = instance.disk_template
2170 a8083063 Iustin Pop
        elif field == "ip":
2171 a8083063 Iustin Pop
          val = instance.nics[0].ip
2172 a8083063 Iustin Pop
        elif field == "bridge":
2173 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2174 a8083063 Iustin Pop
        elif field == "mac":
2175 a8083063 Iustin Pop
          val = instance.nics[0].mac
2176 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2177 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2178 644eeef9 Iustin Pop
          if disk is None:
2179 8a23d2d3 Iustin Pop
            val = None
2180 644eeef9 Iustin Pop
          else:
2181 644eeef9 Iustin Pop
            val = disk.size
2182 a8083063 Iustin Pop
        else:
2183 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2184 a8083063 Iustin Pop
        iout.append(val)
2185 a8083063 Iustin Pop
      output.append(iout)
2186 a8083063 Iustin Pop
2187 a8083063 Iustin Pop
    return output
2188 a8083063 Iustin Pop
2189 a8083063 Iustin Pop
2190 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2191 a8083063 Iustin Pop
  """Failover an instance.
2192 a8083063 Iustin Pop

2193 a8083063 Iustin Pop
  """
2194 a8083063 Iustin Pop
  HPATH = "instance-failover"
2195 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2196 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2197 a8083063 Iustin Pop
2198 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2199 a8083063 Iustin Pop
    """Build hooks env.
2200 a8083063 Iustin Pop

2201 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2202 a8083063 Iustin Pop

2203 a8083063 Iustin Pop
    """
2204 a8083063 Iustin Pop
    env = {
2205 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2206 a8083063 Iustin Pop
      }
2207 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2208 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2209 a8083063 Iustin Pop
    return env, nl, nl
2210 a8083063 Iustin Pop
2211 a8083063 Iustin Pop
  def CheckPrereq(self):
2212 a8083063 Iustin Pop
    """Check prerequisites.
2213 a8083063 Iustin Pop

2214 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2215 a8083063 Iustin Pop

2216 a8083063 Iustin Pop
    """
2217 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2218 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2219 a8083063 Iustin Pop
    if instance is None:
2220 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2221 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2222 a8083063 Iustin Pop
2223 2a710df1 Michael Hanselmann
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2224 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2225 2a710df1 Michael Hanselmann
                                 " remote_raid1.")
2226 2a710df1 Michael Hanselmann
2227 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2228 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2229 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2230 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2231 2a710df1 Michael Hanselmann
2232 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2233 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2234 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2235 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2236 3a7c308e Guido Trotter
    if not info:
2237 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
2238 3ecf6786 Iustin Pop
                                 " from node '%s'" % nodeinfo)
2239 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2240 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Not enough memory on target node %s."
2241 3ecf6786 Iustin Pop
                                 " %d MB available, %d MB required" %
2242 3ecf6786 Iustin Pop
                                 (target_node, info['memory_free'],
2243 3ecf6786 Iustin Pop
                                  instance.memory))
2244 3a7c308e Guido Trotter
2245 a8083063 Iustin Pop
    # check bridge existance
2246 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2247 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
2248 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2249 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2250 3ecf6786 Iustin Pop
                                 (brlist, instance.primary_node))
2251 a8083063 Iustin Pop
2252 a8083063 Iustin Pop
    self.instance = instance
2253 a8083063 Iustin Pop
2254 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2255 a8083063 Iustin Pop
    """Failover an instance.
2256 a8083063 Iustin Pop

2257 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2258 a8083063 Iustin Pop
    starting it on the secondary.
2259 a8083063 Iustin Pop

2260 a8083063 Iustin Pop
    """
2261 a8083063 Iustin Pop
    instance = self.instance
2262 a8083063 Iustin Pop
2263 a8083063 Iustin Pop
    source_node = instance.primary_node
2264 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2265 a8083063 Iustin Pop
2266 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2267 a8083063 Iustin Pop
    for dev in instance.disks:
2268 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2269 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2270 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2271 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2272 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2273 a8083063 Iustin Pop
2274 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2275 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2276 a8083063 Iustin Pop
2277 a8083063 Iustin Pop
    if not nodeinfo:
2278 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact target node %s." %
2279 3ecf6786 Iustin Pop
                               target_node)
2280 a8083063 Iustin Pop
2281 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2282 a8083063 Iustin Pop
    memory = instance.memory
2283 a8083063 Iustin Pop
    if memory > free_memory:
2284 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to create instance %s on"
2285 3ecf6786 Iustin Pop
                               " node %s. needed %s MiB, available %s MiB" %
2286 3ecf6786 Iustin Pop
                               (instance.name, target_node, memory,
2287 3ecf6786 Iustin Pop
                                free_memory))
2288 a8083063 Iustin Pop
2289 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2290 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2291 a8083063 Iustin Pop
                (instance.name, source_node))
2292 a8083063 Iustin Pop
2293 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2294 a8083063 Iustin Pop
      logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2295 a8083063 Iustin Pop
                   " anyway. Please make sure node %s is down"  %
2296 a8083063 Iustin Pop
                   (instance.name, source_node, source_node))
2297 a8083063 Iustin Pop
2298 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2299 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2300 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2301 a8083063 Iustin Pop
2302 a8083063 Iustin Pop
    instance.primary_node = target_node
2303 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2304 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2305 a8083063 Iustin Pop
2306 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2307 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2308 a8083063 Iustin Pop
                (instance.name, target_node))
2309 a8083063 Iustin Pop
2310 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2311 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2312 a8083063 Iustin Pop
    if not disks_ok:
2313 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2314 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2315 a8083063 Iustin Pop
2316 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2317 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2318 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2319 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2320 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2321 a8083063 Iustin Pop
2322 a8083063 Iustin Pop
2323 a0c3fea1 Michael Hanselmann
def _CreateBlockDevOnPrimary(cfg, node, device, info):
2324 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2325 a8083063 Iustin Pop

2326 a8083063 Iustin Pop
  This always creates all devices.
2327 a8083063 Iustin Pop

2328 a8083063 Iustin Pop
  """
2329 a8083063 Iustin Pop
  if device.children:
2330 a8083063 Iustin Pop
    for child in device.children:
2331 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnPrimary(cfg, node, child, info):
2332 a8083063 Iustin Pop
        return False
2333 a8083063 Iustin Pop
2334 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2335 a0c3fea1 Michael Hanselmann
  new_id = rpc.call_blockdev_create(node, device, device.size, True, info)
2336 a8083063 Iustin Pop
  if not new_id:
2337 a8083063 Iustin Pop
    return False
2338 a8083063 Iustin Pop
  if device.physical_id is None:
2339 a8083063 Iustin Pop
    device.physical_id = new_id
2340 a8083063 Iustin Pop
  return True
2341 a8083063 Iustin Pop
2342 a8083063 Iustin Pop
2343 a0c3fea1 Michael Hanselmann
def _CreateBlockDevOnSecondary(cfg, node, device, force, info):
2344 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2345 a8083063 Iustin Pop

2346 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2347 a8083063 Iustin Pop
  all its children.
2348 a8083063 Iustin Pop

2349 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2350 a8083063 Iustin Pop

2351 a8083063 Iustin Pop
  """
2352 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2353 a8083063 Iustin Pop
    force = True
2354 a8083063 Iustin Pop
  if device.children:
2355 a8083063 Iustin Pop
    for child in device.children:
2356 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, node, child, force, info):
2357 a8083063 Iustin Pop
        return False
2358 a8083063 Iustin Pop
2359 a8083063 Iustin Pop
  if not force:
2360 a8083063 Iustin Pop
    return True
2361 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2362 a0c3fea1 Michael Hanselmann
  new_id = rpc.call_blockdev_create(node, device, device.size, False, info)
2363 a8083063 Iustin Pop
  if not new_id:
2364 a8083063 Iustin Pop
    return False
2365 a8083063 Iustin Pop
  if device.physical_id is None:
2366 a8083063 Iustin Pop
    device.physical_id = new_id
2367 a8083063 Iustin Pop
  return True
2368 a8083063 Iustin Pop
2369 a8083063 Iustin Pop
2370 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2371 923b1523 Iustin Pop
  """Generate a suitable LV name.
2372 923b1523 Iustin Pop

2373 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2374 923b1523 Iustin Pop

2375 923b1523 Iustin Pop
  """
2376 923b1523 Iustin Pop
  results = []
2377 923b1523 Iustin Pop
  for val in exts:
2378 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2379 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2380 923b1523 Iustin Pop
  return results
2381 923b1523 Iustin Pop
2382 923b1523 Iustin Pop
2383 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2384 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2385 a8083063 Iustin Pop

2386 a8083063 Iustin Pop
  """
2387 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2388 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2389 a8083063 Iustin Pop
  dev_data = objects.Disk(dev_type="lvm", size=size,
2390 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2391 a8083063 Iustin Pop
  dev_meta = objects.Disk(dev_type="lvm", size=128,
2392 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2393 a8083063 Iustin Pop
  drbd_dev = objects.Disk(dev_type="drbd", size=size,
2394 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2395 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2396 a8083063 Iustin Pop
  return drbd_dev
2397 a8083063 Iustin Pop
2398 a8083063 Iustin Pop
2399 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2400 a8083063 Iustin Pop
                          instance_name, primary_node,
2401 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2402 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2403 a8083063 Iustin Pop

2404 a8083063 Iustin Pop
  """
2405 a8083063 Iustin Pop
  #TODO: compute space requirements
2406 a8083063 Iustin Pop
2407 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2408 a8083063 Iustin Pop
  if template_name == "diskless":
2409 a8083063 Iustin Pop
    disks = []
2410 a8083063 Iustin Pop
  elif template_name == "plain":
2411 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2412 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2413 923b1523 Iustin Pop
2414 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2415 a8083063 Iustin Pop
    sda_dev = objects.Disk(dev_type="lvm", size=disk_sz,
2416 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2417 a8083063 Iustin Pop
                           iv_name = "sda")
2418 a8083063 Iustin Pop
    sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz,
2419 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2420 a8083063 Iustin Pop
                           iv_name = "sdb")
2421 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2422 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2423 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2424 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2425 923b1523 Iustin Pop
2426 923b1523 Iustin Pop
2427 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2428 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2429 a8083063 Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz,
2430 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2431 a8083063 Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz,
2432 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2433 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda",
2434 a8083063 Iustin Pop
                              size=disk_sz,
2435 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2436 a8083063 Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz,
2437 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2438 a8083063 Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz,
2439 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2440 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb",
2441 a8083063 Iustin Pop
                              size=swap_sz,
2442 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2443 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2444 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2445 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2446 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2447 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2448 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2449 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2450 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2451 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2452 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda",
2453 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2454 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2455 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2456 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb",
2457 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2458 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2459 a8083063 Iustin Pop
  else:
2460 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2461 a8083063 Iustin Pop
  return disks
2462 a8083063 Iustin Pop
2463 a8083063 Iustin Pop
2464 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2465 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2466 3ecf6786 Iustin Pop

2467 3ecf6786 Iustin Pop
  """
2468 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2469 a0c3fea1 Michael Hanselmann
2470 a0c3fea1 Michael Hanselmann
2471 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2472 a8083063 Iustin Pop
  """Create all disks for an instance.
2473 a8083063 Iustin Pop

2474 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2475 a8083063 Iustin Pop

2476 a8083063 Iustin Pop
  Args:
2477 a8083063 Iustin Pop
    instance: the instance object
2478 a8083063 Iustin Pop

2479 a8083063 Iustin Pop
  Returns:
2480 a8083063 Iustin Pop
    True or False showing the success of the creation process
2481 a8083063 Iustin Pop

2482 a8083063 Iustin Pop
  """
2483 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2484 a0c3fea1 Michael Hanselmann
2485 a8083063 Iustin Pop
  for device in instance.disks:
2486 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2487 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2488 a8083063 Iustin Pop
    #HARDCODE
2489 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2490 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False,
2491 a0c3fea1 Michael Hanselmann
                                        info):
2492 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2493 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2494 a8083063 Iustin Pop
        return False
2495 a8083063 Iustin Pop
    #HARDCODE
2496 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device, info):
2497 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2498 a8083063 Iustin Pop
                   device.iv_name)
2499 a8083063 Iustin Pop
      return False
2500 a8083063 Iustin Pop
  return True
2501 a8083063 Iustin Pop
2502 a8083063 Iustin Pop
2503 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2504 a8083063 Iustin Pop
  """Remove all disks for an instance.
2505 a8083063 Iustin Pop

2506 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2507 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2508 a8083063 Iustin Pop
  be remove, the removal will continue with the other ones (compare
2509 a8083063 Iustin Pop
  with `_CreateDisks()`).
2510 a8083063 Iustin Pop

2511 a8083063 Iustin Pop
  Args:
2512 a8083063 Iustin Pop
    instance: the instance object
2513 a8083063 Iustin Pop

2514 a8083063 Iustin Pop
  Returns:
2515 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2516 a8083063 Iustin Pop

2517 a8083063 Iustin Pop
  """
2518 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2519 a8083063 Iustin Pop
2520 a8083063 Iustin Pop
  result = True
2521 a8083063 Iustin Pop
  for device in instance.disks:
2522 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2523 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2524 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2525 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2526 a8083063 Iustin Pop
                     " continuing anyway" %
2527 a8083063 Iustin Pop
                     (device.iv_name, node))
2528 a8083063 Iustin Pop
        result = False
2529 a8083063 Iustin Pop
  return result
2530 a8083063 Iustin Pop
2531 a8083063 Iustin Pop
2532 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2533 a8083063 Iustin Pop
  """Create an instance.
2534 a8083063 Iustin Pop

2535 a8083063 Iustin Pop
  """
2536 a8083063 Iustin Pop
  HPATH = "instance-add"
2537 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2538 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2539 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2540 a8083063 Iustin Pop
              "wait_for_sync"]
2541 a8083063 Iustin Pop
2542 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2543 a8083063 Iustin Pop
    """Build hooks env.
2544 a8083063 Iustin Pop

2545 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2546 a8083063 Iustin Pop

2547 a8083063 Iustin Pop
    """
2548 a8083063 Iustin Pop
    env = {
2549 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2550 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2551 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2552 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2553 a8083063 Iustin Pop
      }
2554 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2555 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2556 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2557 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2558 396e1b78 Michael Hanselmann
2559 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2560 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2561 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2562 396e1b78 Michael Hanselmann
      status=self.instance_status,
2563 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2564 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2565 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2566 396e1b78 Michael Hanselmann
      nics=[(self.inst_ip, self.op.bridge)],
2567 396e1b78 Michael Hanselmann
    ))
2568 a8083063 Iustin Pop
2569 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2570 a8083063 Iustin Pop
          self.secondaries)
2571 a8083063 Iustin Pop
    return env, nl, nl
2572 a8083063 Iustin Pop
2573 a8083063 Iustin Pop
2574 a8083063 Iustin Pop
  def CheckPrereq(self):
2575 a8083063 Iustin Pop
    """Check prerequisites.
2576 a8083063 Iustin Pop

2577 a8083063 Iustin Pop
    """
2578 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2579 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2580 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2581 3ecf6786 Iustin Pop
                                 self.op.mode)
2582 a8083063 Iustin Pop
2583 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2584 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2585 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2586 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2587 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2588 3ecf6786 Iustin Pop
                                   " node and path options")
2589 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2590 a8083063 Iustin Pop
      if src_node_full is None:
2591 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2592 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2593 a8083063 Iustin Pop
2594 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2595 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2596 a8083063 Iustin Pop
2597 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2598 a8083063 Iustin Pop
2599 a8083063 Iustin Pop
      if not export_info:
2600 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2601 a8083063 Iustin Pop
2602 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2603 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2604 a8083063 Iustin Pop
2605 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2606 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2607 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2608 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2609 a8083063 Iustin Pop
2610 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2611 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2612 3ecf6786 Iustin Pop
                                   " one data disk")
2613 a8083063 Iustin Pop
2614 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2615 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2616 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2617 a8083063 Iustin Pop
                                                         'disk0_dump'))
2618 a8083063 Iustin Pop
      self.src_image = diskimage
2619 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2620 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2621 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2622 a8083063 Iustin Pop
2623 a8083063 Iustin Pop
    # check primary node
2624 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2625 a8083063 Iustin Pop
    if pnode is None:
2626 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2627 3ecf6786 Iustin Pop
                                 self.op.pnode)
2628 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2629 a8083063 Iustin Pop
    self.pnode = pnode
2630 a8083063 Iustin Pop
    self.secondaries = []
2631 a8083063 Iustin Pop
    # disk template and mirror node verification
2632 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2633 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2634 a8083063 Iustin Pop
2635 a8083063 Iustin Pop
    if self.op.disk_template == constants.DT_REMOTE_RAID1:
2636 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2637 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The 'remote_raid1' disk template needs"
2638 3ecf6786 Iustin Pop
                                   " a mirror node")
2639 a8083063 Iustin Pop
2640 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2641 a8083063 Iustin Pop
      if snode_name is None:
2642 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2643 3ecf6786 Iustin Pop
                                   self.op.snode)
2644 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2645 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
2646 3ecf6786 Iustin Pop
                                   " the primary node.")
2647 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2648 a8083063 Iustin Pop
2649 ed1ebc60 Guido Trotter
    # Check lv size requirements
2650 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2651 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2652 ed1ebc60 Guido Trotter
2653 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2654 ed1ebc60 Guido Trotter
    req_size_dict = {
2655 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2656 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2657 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2658 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2659 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2660 ed1ebc60 Guido Trotter
    }
2661 ed1ebc60 Guido Trotter
2662 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2663 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
2664 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
2665 ed1ebc60 Guido Trotter
2666 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2667 ed1ebc60 Guido Trotter
2668 ed1ebc60 Guido Trotter
    for node in nodenames:
2669 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2670 ed1ebc60 Guido Trotter
      if not info:
2671 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
2672 3ecf6786 Iustin Pop
                                   " from node '%s'" % nodeinfo)
2673 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2674 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s."
2675 3ecf6786 Iustin Pop
                                   " %d MB available, %d MB required" %
2676 3ecf6786 Iustin Pop
                                   (node, info['vg_free'], req_size))
2677 ed1ebc60 Guido Trotter
2678 a8083063 Iustin Pop
    # os verification
2679 a8083063 Iustin Pop
    os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2680 a8083063 Iustin Pop
    if not isinstance(os_obj, objects.OS):
2681 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
2682 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
2683 a8083063 Iustin Pop
2684 a8083063 Iustin Pop
    # instance verification
2685 a8083063 Iustin Pop
    hostname1 = utils.LookupHostname(self.op.instance_name)
2686 a8083063 Iustin Pop
    if not hostname1:
2687 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance name '%s' not found in dns" %
2688 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2689 a8083063 Iustin Pop
2690 a8083063 Iustin Pop
    self.op.instance_name = instance_name = hostname1['hostname']
2691 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2692 a8083063 Iustin Pop
    if instance_name in instance_list:
2693 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2694 3ecf6786 Iustin Pop
                                 instance_name)
2695 a8083063 Iustin Pop
2696 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2697 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2698 a8083063 Iustin Pop
      inst_ip = None
2699 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2700 a8083063 Iustin Pop
      inst_ip = hostname1['ip']
2701 a8083063 Iustin Pop
    else:
2702 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2703 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
2704 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
2705 a8083063 Iustin Pop
      inst_ip = ip
2706 a8083063 Iustin Pop
    self.inst_ip = inst_ip
2707 a8083063 Iustin Pop
2708 a8083063 Iustin Pop
    command = ["fping", "-q", hostname1['ip']]
2709 a8083063 Iustin Pop
    result = utils.RunCmd(command)
2710 a8083063 Iustin Pop
    if not result.failed:
2711 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("IP %s of instance %s already in use" %
2712 3ecf6786 Iustin Pop
                                 (hostname1['ip'], instance_name))
2713 a8083063 Iustin Pop
2714 a8083063 Iustin Pop
    # bridge verification
2715 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
2716 a8083063 Iustin Pop
    if bridge is None:
2717 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
2718 a8083063 Iustin Pop
    else:
2719 a8083063 Iustin Pop
      self.op.bridge = bridge
2720 a8083063 Iustin Pop
2721 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
2722 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
2723 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
2724 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
2725 a8083063 Iustin Pop
2726 a8083063 Iustin Pop
    if self.op.start:
2727 a8083063 Iustin Pop
      self.instance_status = 'up'
2728 a8083063 Iustin Pop
    else:
2729 a8083063 Iustin Pop
      self.instance_status = 'down'
2730 a8083063 Iustin Pop
2731 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2732 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
2733 a8083063 Iustin Pop

2734 a8083063 Iustin Pop
    """
2735 a8083063 Iustin Pop
    instance = self.op.instance_name
2736 a8083063 Iustin Pop
    pnode_name = self.pnode.name
2737 a8083063 Iustin Pop
2738 a8083063 Iustin Pop
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
2739 a8083063 Iustin Pop
    if self.inst_ip is not None:
2740 a8083063 Iustin Pop
      nic.ip = self.inst_ip
2741 a8083063 Iustin Pop
2742 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
2743 a8083063 Iustin Pop
                                  self.op.disk_template,
2744 a8083063 Iustin Pop
                                  instance, pnode_name,
2745 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
2746 a8083063 Iustin Pop
                                  self.op.swap_size)
2747 a8083063 Iustin Pop
2748 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
2749 a8083063 Iustin Pop
                            primary_node=pnode_name,
2750 a8083063 Iustin Pop
                            memory=self.op.mem_size,
2751 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
2752 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
2753 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
2754 a8083063 Iustin Pop
                            status=self.instance_status,
2755 a8083063 Iustin Pop
                            )
2756 a8083063 Iustin Pop
2757 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
2758 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
2759 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2760 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
2761 a8083063 Iustin Pop
2762 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
2763 a8083063 Iustin Pop
2764 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
2765 a8083063 Iustin Pop
2766 a8083063 Iustin Pop
    if self.op.wait_for_sync:
2767 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj)
2768 2a710df1 Michael Hanselmann
    elif iobj.disk_template == constants.DT_REMOTE_RAID1:
2769 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
2770 a8083063 Iustin Pop
      time.sleep(15)
2771 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
2772 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
2773 a8083063 Iustin Pop
    else:
2774 a8083063 Iustin Pop
      disk_abort = False
2775 a8083063 Iustin Pop
2776 a8083063 Iustin Pop
    if disk_abort:
2777 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2778 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
2779 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
2780 3ecf6786 Iustin Pop
                               " this instance")
2781 a8083063 Iustin Pop
2782 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
2783 a8083063 Iustin Pop
                (instance, pnode_name))
2784 a8083063 Iustin Pop
2785 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
2786 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
2787 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
2788 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
2789 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
2790 3ecf6786 Iustin Pop
                                   " on node %s" %
2791 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
2792 a8083063 Iustin Pop
2793 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
2794 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
2795 a8083063 Iustin Pop
        src_node = self.op.src_node
2796 a8083063 Iustin Pop
        src_image = self.src_image
2797 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
2798 a8083063 Iustin Pop
                                                src_node, src_image):
2799 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
2800 3ecf6786 Iustin Pop
                                   " %s on node %s" %
2801 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
2802 a8083063 Iustin Pop
      else:
2803 a8083063 Iustin Pop
        # also checked in the prereq part
2804 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
2805 3ecf6786 Iustin Pop
                                     % self.op.mode)
2806 a8083063 Iustin Pop
2807 a8083063 Iustin Pop
    if self.op.start:
2808 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
2809 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
2810 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
2811 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
2812 a8083063 Iustin Pop
2813 a8083063 Iustin Pop
2814 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
2815 a8083063 Iustin Pop
  """Connect to an instance's console.
2816 a8083063 Iustin Pop

2817 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
2818 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
2819 a8083063 Iustin Pop
  console.
2820 a8083063 Iustin Pop

2821 a8083063 Iustin Pop
  """
2822 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2823 a8083063 Iustin Pop
2824 a8083063 Iustin Pop
  def CheckPrereq(self):
2825 a8083063 Iustin Pop
    """Check prerequisites.
2826 a8083063 Iustin Pop

2827 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2828 a8083063 Iustin Pop

2829 a8083063 Iustin Pop
    """
2830 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2831 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2832 a8083063 Iustin Pop
    if instance is None:
2833 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2834 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2835 a8083063 Iustin Pop
    self.instance = instance
2836 a8083063 Iustin Pop
2837 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2838 a8083063 Iustin Pop
    """Connect to the console of an instance
2839 a8083063 Iustin Pop

2840 a8083063 Iustin Pop
    """
2841 a8083063 Iustin Pop
    instance = self.instance
2842 a8083063 Iustin Pop
    node = instance.primary_node
2843 a8083063 Iustin Pop
2844 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
2845 a8083063 Iustin Pop
    if node_insts is False:
2846 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
2847 a8083063 Iustin Pop
2848 a8083063 Iustin Pop
    if instance.name not in node_insts:
2849 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
2850 a8083063 Iustin Pop
2851 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
2852 a8083063 Iustin Pop
2853 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
2854 a8083063 Iustin Pop
    console_cmd = hyper.GetShellCommandForConsole(instance.name)
2855 82122173 Iustin Pop
    # build ssh cmdline
2856 82122173 Iustin Pop
    argv = ["ssh", "-q", "-t"]
2857 82122173 Iustin Pop
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
2858 82122173 Iustin Pop
    argv.extend(ssh.BATCH_MODE_OPTS)
2859 82122173 Iustin Pop
    argv.append(node)
2860 82122173 Iustin Pop
    argv.append(console_cmd)
2861 82122173 Iustin Pop
    return "ssh", argv
2862 a8083063 Iustin Pop
2863 a8083063 Iustin Pop
2864 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
2865 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
2866 a8083063 Iustin Pop

2867 a8083063 Iustin Pop
  """
2868 a8083063 Iustin Pop
  HPATH = "mirror-add"
2869 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2870 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
2871 a8083063 Iustin Pop
2872 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2873 a8083063 Iustin Pop
    """Build hooks env.
2874 a8083063 Iustin Pop

2875 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2876 a8083063 Iustin Pop

2877 a8083063 Iustin Pop
    """
2878 a8083063 Iustin Pop
    env = {
2879 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
2880 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
2881 a8083063 Iustin Pop
      }
2882 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2883 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
2884 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
2885 a8083063 Iustin Pop
    return env, nl, nl
2886 a8083063 Iustin Pop
2887 a8083063 Iustin Pop
  def CheckPrereq(self):
2888 a8083063 Iustin Pop
    """Check prerequisites.
2889 a8083063 Iustin Pop

2890 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2891 a8083063 Iustin Pop

2892 a8083063 Iustin Pop
    """
2893 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2894 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2895 a8083063 Iustin Pop
    if instance is None:
2896 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2897 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2898 a8083063 Iustin Pop
    self.instance = instance
2899 a8083063 Iustin Pop
2900 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
2901 a8083063 Iustin Pop
    if remote_node is None:
2902 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
2903 a8083063 Iustin Pop
    self.remote_node = remote_node
2904 a8083063 Iustin Pop
2905 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
2906 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
2907 3ecf6786 Iustin Pop
                                 " the instance.")
2908 a8083063 Iustin Pop
2909 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2910 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
2911 3ecf6786 Iustin Pop
                                 " remote_raid1.")
2912 a8083063 Iustin Pop
    for disk in instance.disks:
2913 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
2914 a8083063 Iustin Pop
        break
2915 a8083063 Iustin Pop
    else:
2916 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
2917 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
2918 a8083063 Iustin Pop
    if len(disk.children) > 1:
2919 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The device already has two slave"
2920 3ecf6786 Iustin Pop
                                 " devices.\n"
2921 3ecf6786 Iustin Pop
                                 "This would create a 3-disk raid1"
2922 3ecf6786 Iustin Pop
                                 " which we don't allow.")
2923 a8083063 Iustin Pop
    self.disk = disk
2924 a8083063 Iustin Pop
2925 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2926 a8083063 Iustin Pop
    """Add the mirror component
2927 a8083063 Iustin Pop

2928 a8083063 Iustin Pop
    """
2929 a8083063 Iustin Pop
    disk = self.disk
2930 a8083063 Iustin Pop
    instance = self.instance
2931 a8083063 Iustin Pop
2932 a8083063 Iustin Pop
    remote_node = self.remote_node
2933 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
2934 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
2935 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
2936 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
2937 a8083063 Iustin Pop
2938 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
2939 a8083063 Iustin Pop
    #HARDCODE
2940 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False,
2941 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
2942 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
2943 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
2944 a8083063 Iustin Pop
2945 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
2946 a8083063 Iustin Pop
    #HARDCODE
2947 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd,
2948 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
2949 a8083063 Iustin Pop
      # remove secondary dev
2950 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
2951 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
2952 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
2953 a8083063 Iustin Pop
2954 a8083063 Iustin Pop
    # the device exists now
2955 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
2956 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
2957 a8083063 Iustin Pop
    if not rpc.call_blockdev_addchild(instance.primary_node,
2958 a8083063 Iustin Pop
                                           disk, new_drbd):
2959 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
2960 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
2961 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
2962 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
2963 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
2964 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
2965 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
2966 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
2967 a8083063 Iustin Pop
2968 a8083063 Iustin Pop
    disk.children.append(new_drbd)
2969 a8083063 Iustin Pop
2970 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2971 a8083063 Iustin Pop
2972 a8083063 Iustin Pop
    _WaitForSync(self.cfg, instance)
2973 a8083063 Iustin Pop
2974 a8083063 Iustin Pop
    return 0
2975 a8083063 Iustin Pop
2976 a8083063 Iustin Pop
2977 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
2978 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
2979 a8083063 Iustin Pop

2980 a8083063 Iustin Pop
  """
2981 a8083063 Iustin Pop
  HPATH = "mirror-remove"
2982 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2983 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
2984 a8083063 Iustin Pop
2985 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2986 a8083063 Iustin Pop
    """Build hooks env.
2987 a8083063 Iustin Pop

2988 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2989 a8083063 Iustin Pop

2990 a8083063 Iustin Pop
    """
2991 a8083063 Iustin Pop
    env = {
2992 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
2993 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
2994 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
2995 a8083063 Iustin Pop
      }
2996 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2997 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
2998 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
2999 a8083063 Iustin Pop
    return env, nl, nl
3000 a8083063 Iustin Pop
3001 a8083063 Iustin Pop
  def CheckPrereq(self):
3002 a8083063 Iustin Pop
    """Check prerequisites.
3003 a8083063 Iustin Pop

3004 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3005 a8083063 Iustin Pop

3006 a8083063 Iustin Pop
    """
3007 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3008 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3009 a8083063 Iustin Pop
    if instance is None:
3010 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3011 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3012 a8083063 Iustin Pop
    self.instance = instance
3013 a8083063 Iustin Pop
3014 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3015 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3016 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3017 a8083063 Iustin Pop
    for disk in instance.disks:
3018 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3019 a8083063 Iustin Pop
        break
3020 a8083063 Iustin Pop
    else:
3021 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3022 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3023 a8083063 Iustin Pop
    for child in disk.children:
3024 a8083063 Iustin Pop
      if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id:
3025 a8083063 Iustin Pop
        break
3026 a8083063 Iustin Pop
    else:
3027 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3028 a8083063 Iustin Pop
3029 a8083063 Iustin Pop
    if len(disk.children) < 2:
3030 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3031 3ecf6786 Iustin Pop
                                 " a mirror.")
3032 a8083063 Iustin Pop
    self.disk = disk
3033 a8083063 Iustin Pop
    self.child = child
3034 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3035 a8083063 Iustin Pop
      oid = 1
3036 a8083063 Iustin Pop
    else:
3037 a8083063 Iustin Pop
      oid = 0
3038 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3039 a8083063 Iustin Pop
3040 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3041 a8083063 Iustin Pop
    """Remove the mirror component
3042 a8083063 Iustin Pop

3043 a8083063 Iustin Pop
    """
3044 a8083063 Iustin Pop
    instance = self.instance
3045 a8083063 Iustin Pop
    disk = self.disk
3046 a8083063 Iustin Pop
    child = self.child
3047 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3048 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3049 a8083063 Iustin Pop
    if not rpc.call_blockdev_removechild(instance.primary_node,
3050 a8083063 Iustin Pop
                                              disk, child):
3051 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3052 a8083063 Iustin Pop
3053 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3054 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3055 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3056 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3057 a8083063 Iustin Pop
                     " continuing operation." % node)
3058 a8083063 Iustin Pop
3059 a8083063 Iustin Pop
    disk.children.remove(child)
3060 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3061 a8083063 Iustin Pop
3062 a8083063 Iustin Pop
3063 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3064 a8083063 Iustin Pop
  """Replace the disks of an instance.
3065 a8083063 Iustin Pop

3066 a8083063 Iustin Pop
  """
3067 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3068 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3069 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3070 a8083063 Iustin Pop
3071 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3072 a8083063 Iustin Pop
    """Build hooks env.
3073 a8083063 Iustin Pop

3074 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3075 a8083063 Iustin Pop

3076 a8083063 Iustin Pop
    """
3077 a8083063 Iustin Pop
    env = {
3078 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3079 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3080 a8083063 Iustin Pop
      }
3081 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3082 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3083 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3084 a8083063 Iustin Pop
    return env, nl, nl
3085 a8083063 Iustin Pop
3086 a8083063 Iustin Pop
  def CheckPrereq(self):
3087 a8083063 Iustin Pop
    """Check prerequisites.
3088 a8083063 Iustin Pop

3089 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3090 a8083063 Iustin Pop

3091 a8083063 Iustin Pop
    """
3092 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3093 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3094 a8083063 Iustin Pop
    if instance is None:
3095 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3096 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3097 a8083063 Iustin Pop
    self.instance = instance
3098 a8083063 Iustin Pop
3099 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3100 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3101 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3102 a8083063 Iustin Pop
3103 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3104 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3105 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3106 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3107 a8083063 Iustin Pop
3108 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3109 a8083063 Iustin Pop
    if remote_node is None:
3110 a8083063 Iustin Pop
      remote_node = instance.secondary_nodes[0]
3111 a8083063 Iustin Pop
    else:
3112 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3113 a8083063 Iustin Pop
      if remote_node is None:
3114 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3115 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3116 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3117 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3118 3ecf6786 Iustin Pop
                                 " the instance.")
3119 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3120 a8083063 Iustin Pop
3121 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3122 a8083063 Iustin Pop
    """Replace the disks of an instance.
3123 a8083063 Iustin Pop

3124 a8083063 Iustin Pop
    """
3125 a8083063 Iustin Pop
    instance = self.instance
3126 a8083063 Iustin Pop
    iv_names = {}
3127 a8083063 Iustin Pop
    # start of work
3128 a8083063 Iustin Pop
    remote_node = self.op.remote_node
3129 a8083063 Iustin Pop
    cfg = self.cfg
3130 a8083063 Iustin Pop
    for dev in instance.disks:
3131 a8083063 Iustin Pop
      size = dev.size
3132 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3133 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3134 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3135 923b1523 Iustin Pop
                                       remote_node, size, names)
3136 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3137 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3138 a8083063 Iustin Pop
                  dev.iv_name)
3139 a8083063 Iustin Pop
      #HARDCODE
3140 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False,
3141 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3142 3ecf6786 Iustin Pop
        raise errors.OpExecError("Failed to create new component on"
3143 3ecf6786 Iustin Pop
                                 " secondary node %s\n"
3144 3ecf6786 Iustin Pop
                                 "Full abort, cleanup manually!" %
3145 3ecf6786 Iustin Pop
                                 remote_node)
3146 a8083063 Iustin Pop
3147 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3148 a8083063 Iustin Pop
      #HARDCODE
3149 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd,
3150 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3151 a8083063 Iustin Pop
        # remove secondary dev
3152 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3153 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3154 a8083063 Iustin Pop
        raise errors.OpExecError("Failed to create volume on primary!\n"
3155 a8083063 Iustin Pop
                                 "Full abort, cleanup manually!!")
3156 a8083063 Iustin Pop
3157 a8083063 Iustin Pop
      # the device exists now
3158 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3159 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3160 a8083063 Iustin Pop
      if not rpc.call_blockdev_addchild(instance.primary_node, dev,
3161 880478f8 Iustin Pop
                                        new_drbd):
3162 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3163 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3164 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3165 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3166 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3167 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3168 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3169 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3170 a8083063 Iustin Pop
3171 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3172 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3173 a8083063 Iustin Pop
3174 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3175 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3176 a8083063 Iustin Pop
    # return value
3177 a8083063 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3178 a8083063 Iustin Pop
3179 a8083063 Iustin Pop
    # so check manually all the devices
3180 a8083063 Iustin Pop
    for name in iv_names:
3181 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3182 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3183 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3184 a8083063 Iustin Pop
      if is_degr:
3185 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3186 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3187 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3188 a8083063 Iustin Pop
      if is_degr:
3189 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3190 a8083063 Iustin Pop
3191 a8083063 Iustin Pop
    for name in iv_names:
3192 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3193 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3194 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3195 a8083063 Iustin Pop
      if not rpc.call_blockdev_removechild(instance.primary_node,
3196 a8083063 Iustin Pop
                                                dev, child):
3197 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3198 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3199 a8083063 Iustin Pop
        continue
3200 a8083063 Iustin Pop
3201 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3202 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3203 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3204 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3205 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3206 a8083063 Iustin Pop
                       " continuing operation." % node)
3207 a8083063 Iustin Pop
3208 a8083063 Iustin Pop
      dev.children.remove(child)
3209 a8083063 Iustin Pop
3210 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3211 a8083063 Iustin Pop
3212 a8083063 Iustin Pop
3213 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3214 a8083063 Iustin Pop
  """Query runtime instance data.
3215 a8083063 Iustin Pop

3216 a8083063 Iustin Pop
  """
3217 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3218 a8083063 Iustin Pop
3219 a8083063 Iustin Pop
  def CheckPrereq(self):
3220 a8083063 Iustin Pop
    """Check prerequisites.
3221 a8083063 Iustin Pop

3222 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3223 a8083063 Iustin Pop

3224 a8083063 Iustin Pop
    """
3225 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3226 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3227 a8083063 Iustin Pop
    if self.op.instances:
3228 a8083063 Iustin Pop
      self.wanted_instances = []
3229 a8083063 Iustin Pop
      names = self.op.instances
3230 a8083063 Iustin Pop
      for name in names:
3231 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3232 a8083063 Iustin Pop
        if instance is None:
3233 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3234 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3235 a8083063 Iustin Pop
    else:
3236 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3237 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3238 a8083063 Iustin Pop
    return
3239 a8083063 Iustin Pop
3240 a8083063 Iustin Pop
3241 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3242 a8083063 Iustin Pop
    """Compute block device status.
3243 a8083063 Iustin Pop

3244 a8083063 Iustin Pop
    """
3245 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3246 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3247 a8083063 Iustin Pop
    if dev.dev_type == "drbd":
3248 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3249 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3250 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3251 a8083063 Iustin Pop
      else:
3252 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3253 a8083063 Iustin Pop
3254 a8083063 Iustin Pop
    if snode:
3255 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3256 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3257 a8083063 Iustin Pop
    else:
3258 a8083063 Iustin Pop
      dev_sstatus = None
3259 a8083063 Iustin Pop
3260 a8083063 Iustin Pop
    if dev.children:
3261 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3262 a8083063 Iustin Pop
                      for child in dev.children]
3263 a8083063 Iustin Pop
    else:
3264 a8083063 Iustin Pop
      dev_children = []
3265 a8083063 Iustin Pop
3266 a8083063 Iustin Pop
    data = {
3267 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3268 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3269 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3270 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3271 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3272 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3273 a8083063 Iustin Pop
      "children": dev_children,
3274 a8083063 Iustin Pop
      }
3275 a8083063 Iustin Pop
3276 a8083063 Iustin Pop
    return data
3277 a8083063 Iustin Pop
3278 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3279 a8083063 Iustin Pop
    """Gather and return data"""
3280 a8083063 Iustin Pop
    result = {}
3281 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3282 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3283 a8083063 Iustin Pop
                                                instance.name)
3284 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3285 a8083063 Iustin Pop
        remote_state = "up"
3286 a8083063 Iustin Pop
      else:
3287 a8083063 Iustin Pop
        remote_state = "down"
3288 a8083063 Iustin Pop
      if instance.status == "down":
3289 a8083063 Iustin Pop
        config_state = "down"
3290 a8083063 Iustin Pop
      else:
3291 a8083063 Iustin Pop
        config_state = "up"
3292 a8083063 Iustin Pop
3293 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3294 a8083063 Iustin Pop
               for device in instance.disks]
3295 a8083063 Iustin Pop
3296 a8083063 Iustin Pop
      idict = {
3297 a8083063 Iustin Pop
        "name": instance.name,
3298 a8083063 Iustin Pop
        "config_state": config_state,
3299 a8083063 Iustin Pop
        "run_state": remote_state,
3300 a8083063 Iustin Pop
        "pnode": instance.primary_node,
3301 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
3302 a8083063 Iustin Pop
        "os": instance.os,
3303 a8083063 Iustin Pop
        "memory": instance.memory,
3304 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3305 a8083063 Iustin Pop
        "disks": disks,
3306 a8083063 Iustin Pop
        }
3307 a8083063 Iustin Pop
3308 a8083063 Iustin Pop
      result[instance.name] = idict
3309 a8083063 Iustin Pop
3310 a8083063 Iustin Pop
    return result
3311 a8083063 Iustin Pop
3312 a8083063 Iustin Pop
3313 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
3314 a8083063 Iustin Pop
  """Modifies an instances's parameters.
3315 a8083063 Iustin Pop

3316 a8083063 Iustin Pop
  """
3317 a8083063 Iustin Pop
  HPATH = "instance-modify"
3318 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3319 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3320 a8083063 Iustin Pop
3321 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3322 a8083063 Iustin Pop
    """Build hooks env.
3323 a8083063 Iustin Pop

3324 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
3325 a8083063 Iustin Pop

3326 a8083063 Iustin Pop
    """
3327 396e1b78 Michael Hanselmann
    args = dict()
3328 a8083063 Iustin Pop
    if self.mem:
3329 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
3330 a8083063 Iustin Pop
    if self.vcpus:
3331 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
3332 396e1b78 Michael Hanselmann
    if self.do_ip or self.do_bridge:
3333 396e1b78 Michael Hanselmann
      if self.do_ip:
3334 396e1b78 Michael Hanselmann
        ip = self.ip
3335 396e1b78 Michael Hanselmann
      else:
3336 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
3337 396e1b78 Michael Hanselmann
      if self.bridge:
3338 396e1b78 Michael Hanselmann
        bridge = self.bridge
3339 396e1b78 Michael Hanselmann
      else:
3340 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
3341 396e1b78 Michael Hanselmann
      args['nics'] = [(ip, bridge)]
3342 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
3343 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3344 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3345 a8083063 Iustin Pop
    return env, nl, nl
3346 a8083063 Iustin Pop
3347 a8083063 Iustin Pop
  def CheckPrereq(self):
3348 a8083063 Iustin Pop
    """Check prerequisites.
3349 a8083063 Iustin Pop

3350 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
3351 a8083063 Iustin Pop

3352 a8083063 Iustin Pop
    """
3353 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
3354 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
3355 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
3356 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
3357 a8083063 Iustin Pop
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
3358 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
3359 a8083063 Iustin Pop
    if self.mem is not None:
3360 a8083063 Iustin Pop
      try:
3361 a8083063 Iustin Pop
        self.mem = int(self.mem)
3362 a8083063 Iustin Pop
      except ValueError, err:
3363 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
3364 a8083063 Iustin Pop
    if self.vcpus is not None:
3365 a8083063 Iustin Pop
      try:
3366 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
3367 a8083063 Iustin Pop
      except ValueError, err:
3368 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
3369 a8083063 Iustin Pop
    if self.ip is not None:
3370 a8083063 Iustin Pop
      self.do_ip = True
3371 a8083063 Iustin Pop
      if self.ip.lower() == "none":
3372 a8083063 Iustin Pop
        self.ip = None
3373 a8083063 Iustin Pop
      else:
3374 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
3375 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
3376 a8083063 Iustin Pop
    else:
3377 a8083063 Iustin Pop
      self.do_ip = False
3378 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
3379 a8083063 Iustin Pop
3380 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3381 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3382 a8083063 Iustin Pop
    if instance is None:
3383 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
3384 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3385 a8083063 Iustin Pop
    self.op.instance_name = instance.name
3386 a8083063 Iustin Pop
    self.instance = instance
3387 a8083063 Iustin Pop
    return
3388 a8083063 Iustin Pop
3389 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3390 a8083063 Iustin Pop
    """Modifies an instance.
3391 a8083063 Iustin Pop

3392 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
3393 a8083063 Iustin Pop
    """
3394 a8083063 Iustin Pop
    result = []
3395 a8083063 Iustin Pop
    instance = self.instance
3396 a8083063 Iustin Pop
    if self.mem:
3397 a8083063 Iustin Pop
      instance.memory = self.mem
3398 a8083063 Iustin Pop
      result.append(("mem", self.mem))
3399 a8083063 Iustin Pop
    if self.vcpus:
3400 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
3401 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
3402 a8083063 Iustin Pop
    if self.do_ip:
3403 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
3404 a8083063 Iustin Pop
      result.append(("ip", self.ip))
3405 a8083063 Iustin Pop
    if self.bridge:
3406 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
3407 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
3408 a8083063 Iustin Pop
3409 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3410 a8083063 Iustin Pop
3411 a8083063 Iustin Pop
    return result
3412 a8083063 Iustin Pop
3413 a8083063 Iustin Pop
3414 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
3415 a8083063 Iustin Pop
  """Query the exports list
3416 a8083063 Iustin Pop

3417 a8083063 Iustin Pop
  """
3418 a8083063 Iustin Pop
  _OP_REQP = []
3419 a8083063 Iustin Pop
3420 a8083063 Iustin Pop
  def CheckPrereq(self):
3421 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
3422 a8083063 Iustin Pop

3423 a8083063 Iustin Pop
    """
3424 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
3425 a8083063 Iustin Pop
3426 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3427 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
3428 a8083063 Iustin Pop

3429 a8083063 Iustin Pop
    Returns:
3430 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
3431 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
3432 a8083063 Iustin Pop
      that node.
3433 a8083063 Iustin Pop

3434 a8083063 Iustin Pop
    """
3435 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
3436 a8083063 Iustin Pop
3437 a8083063 Iustin Pop
3438 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
3439 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
3440 a8083063 Iustin Pop

3441 a8083063 Iustin Pop
  """
3442 a8083063 Iustin Pop
  HPATH = "instance-export"
3443 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3444 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
3445 a8083063 Iustin Pop
3446 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3447 a8083063 Iustin Pop
    """Build hooks env.
3448 a8083063 Iustin Pop

3449 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
3450 a8083063 Iustin Pop

3451 a8083063 Iustin Pop
    """
3452 a8083063 Iustin Pop
    env = {
3453 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
3454 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
3455 a8083063 Iustin Pop
      }
3456 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3457 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3458 a8083063 Iustin Pop
          self.op.target_node]
3459 a8083063 Iustin Pop
    return env, nl, nl
3460 a8083063 Iustin Pop
3461 a8083063 Iustin Pop
  def CheckPrereq(self):
3462 a8083063 Iustin Pop
    """Check prerequisites.
3463 a8083063 Iustin Pop

3464 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
3465 a8083063 Iustin Pop

3466 a8083063 Iustin Pop
    """
3467 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
3468 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
3469 a8083063 Iustin Pop
    if self.instance is None:
3470 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
3471 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3472 a8083063 Iustin Pop
3473 a8083063 Iustin Pop
    # node verification
3474 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
3475 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
3476 a8083063 Iustin Pop
3477 a8083063 Iustin Pop
    if self.dst_node is None:
3478 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
3479 3ecf6786 Iustin Pop
                                 self.op.target_node)
3480 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
3481 a8083063 Iustin Pop
3482 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3483 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
3484 a8083063 Iustin Pop

3485 a8083063 Iustin Pop
    """
3486 a8083063 Iustin Pop
    instance = self.instance
3487 a8083063 Iustin Pop
    dst_node = self.dst_node
3488 a8083063 Iustin Pop
    src_node = instance.primary_node
3489 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
3490 a8083063 Iustin Pop
    if self.op.shutdown:
3491 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
3492 a8083063 Iustin Pop
      self.processor.ChainOpCode(op, feedback_fn)
3493 a8083063 Iustin Pop
3494 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
3495 a8083063 Iustin Pop
3496 a8083063 Iustin Pop
    snap_disks = []
3497 a8083063 Iustin Pop
3498 a8083063 Iustin Pop
    try:
3499 a8083063 Iustin Pop
      for disk in instance.disks:
3500 a8083063 Iustin Pop
        if disk.iv_name == "sda":
3501 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
3502 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
3503 a8083063 Iustin Pop
3504 a8083063 Iustin Pop
          if not new_dev_name:
3505 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
3506 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
3507 a8083063 Iustin Pop
          else:
3508 a8083063 Iustin Pop
            new_dev = objects.Disk(dev_type="lvm", size=disk.size,
3509 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
3510 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
3511 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
3512 a8083063 Iustin Pop
            snap_disks.append(new_dev)
3513 a8083063 Iustin Pop
3514 a8083063 Iustin Pop
    finally:
3515 a8083063 Iustin Pop
      if self.op.shutdown:
3516 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
3517 a8083063 Iustin Pop
                                       force=False)
3518 a8083063 Iustin Pop
        self.processor.ChainOpCode(op, feedback_fn)
3519 a8083063 Iustin Pop
3520 a8083063 Iustin Pop
    # TODO: check for size
3521 a8083063 Iustin Pop
3522 a8083063 Iustin Pop
    for dev in snap_disks:
3523 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
3524 a8083063 Iustin Pop
                                           instance):
3525 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
3526 a8083063 Iustin Pop
                     " %s to node %s" %
3527 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
3528 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
3529 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
3530 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
3531 a8083063 Iustin Pop
3532 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
3533 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
3534 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
3535 a8083063 Iustin Pop
3536 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
3537 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
3538 a8083063 Iustin Pop
3539 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
3540 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
3541 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
3542 a8083063 Iustin Pop
    if nodelist:
3543 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
3544 a8083063 Iustin Pop
      exportlist = self.processor.ChainOpCode(op, feedback_fn)
3545 a8083063 Iustin Pop
      for node in exportlist:
3546 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
3547 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
3548 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
3549 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
3550 5c947f38 Iustin Pop
3551 5c947f38 Iustin Pop
3552 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
3553 5c947f38 Iustin Pop
  """Generic tags LU.
3554 5c947f38 Iustin Pop

3555 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
3556 5c947f38 Iustin Pop

3557 5c947f38 Iustin Pop
  """
3558 5c947f38 Iustin Pop
  def CheckPrereq(self):
3559 5c947f38 Iustin Pop
    """Check prerequisites.
3560 5c947f38 Iustin Pop

3561 5c947f38 Iustin Pop
    """
3562 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
3563 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
3564 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
3565 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
3566 5c947f38 Iustin Pop
      if name is None:
3567 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
3568 3ecf6786 Iustin Pop
                                   (self.op.name,))
3569 5c947f38 Iustin Pop
      self.op.name = name
3570 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
3571 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
3572 5c947f38 Iustin Pop
      name = self.cfg.ExpandInstanceName(name)
3573 5c947f38 Iustin Pop
      if name is None:
3574 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
3575 3ecf6786 Iustin Pop
                                   (self.op.name,))
3576 5c947f38 Iustin Pop
      self.op.name = name
3577 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
3578 5c947f38 Iustin Pop
    else:
3579 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
3580 3ecf6786 Iustin Pop
                                 str(self.op.kind))
3581 5c947f38 Iustin Pop
3582 5c947f38 Iustin Pop
3583 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
3584 5c947f38 Iustin Pop
  """Returns the tags of a given object.
3585 5c947f38 Iustin Pop

3586 5c947f38 Iustin Pop
  """
3587 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
3588 5c947f38 Iustin Pop
3589 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
3590 5c947f38 Iustin Pop
    """Returns the tag list.
3591 5c947f38 Iustin Pop

3592 5c947f38 Iustin Pop
    """
3593 5c947f38 Iustin Pop
    return self.target.GetTags()
3594 5c947f38 Iustin Pop
3595 5c947f38 Iustin Pop
3596 5c947f38 Iustin Pop
class LUAddTag(TagsLU):
3597 5c947f38 Iustin Pop
  """Sets a tag on a given object.
3598 5c947f38 Iustin Pop

3599 5c947f38 Iustin Pop
  """
3600 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name", "tag"]
3601 5c947f38 Iustin Pop
3602 5c947f38 Iustin Pop
  def CheckPrereq(self):
3603 5c947f38 Iustin Pop
    """Check prerequisites.
3604 5c947f38 Iustin Pop

3605 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
3606 5c947f38 Iustin Pop

3607 5c947f38 Iustin Pop
    """
3608 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
3609 5c947f38 Iustin Pop
    objects.TaggableObject.ValidateTag(self.op.tag)
3610 5c947f38 Iustin Pop
3611 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
3612 5c947f38 Iustin Pop
    """Sets the tag.
3613 5c947f38 Iustin Pop

3614 5c947f38 Iustin Pop
    """
3615 5c947f38 Iustin Pop
    try:
3616 5c947f38 Iustin Pop
      self.target.AddTag(self.op.tag)
3617 5c947f38 Iustin Pop
    except errors.TagError, err:
3618 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
3619 5c947f38 Iustin Pop
    try:
3620 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
3621 5c947f38 Iustin Pop
    except errors.ConfigurationError:
3622 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
3623 3ecf6786 Iustin Pop
                                " config file and the operation has been"
3624 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
3625 5c947f38 Iustin Pop
3626 5c947f38 Iustin Pop
3627 5c947f38 Iustin Pop
class LUDelTag(TagsLU):
3628 5c947f38 Iustin Pop
  """Delete a tag from a given object.
3629 5c947f38 Iustin Pop

3630 5c947f38 Iustin Pop
  """
3631 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name", "tag"]
3632 5c947f38 Iustin Pop
3633 5c947f38 Iustin Pop
  def CheckPrereq(self):
3634 5c947f38 Iustin Pop
    """Check prerequisites.
3635 5c947f38 Iustin Pop

3636 5c947f38 Iustin Pop
    This checks that we have the given tag.
3637 5c947f38 Iustin Pop

3638 5c947f38 Iustin Pop
    """
3639 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
3640 5c947f38 Iustin Pop
    objects.TaggableObject.ValidateTag(self.op.tag)
3641 5c947f38 Iustin Pop
    if self.op.tag not in self.target.GetTags():
3642 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Tag not found")
3643 5c947f38 Iustin Pop
3644 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
3645 5c947f38 Iustin Pop
    """Remove the tag from the object.
3646 5c947f38 Iustin Pop

3647 5c947f38 Iustin Pop
    """
3648 5c947f38 Iustin Pop
    self.target.RemoveTag(self.op.tag)
3649 5c947f38 Iustin Pop
    try:
3650 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
3651 5c947f38 Iustin Pop
    except errors.ConfigurationError:
3652 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
3653 3ecf6786 Iustin Pop
                                " config file and the operation has been"
3654 3ecf6786 Iustin Pop
                                " aborted. Please retry.")