Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 3312b702

History | View | Annotate | Download (116.8 kB)

1 a8083063 Iustin Pop
#!/usr/bin/python
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import socket
30 a8083063 Iustin Pop
import time
31 a8083063 Iustin Pop
import tempfile
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 a8083063 Iustin Pop
35 a8083063 Iustin Pop
from ganeti import rpc
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 a8083063 Iustin Pop
from ganeti import config
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 a8083063 Iustin Pop
from ganeti import ssconf
46 a8083063 Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
52 a8083063 Iustin Pop
      with all the fields (even if as None)
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
57 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 a8083063 Iustin Pop
  REQ_CLUSTER = True
64 a8083063 Iustin Pop
  REQ_MASTER = True
65 a8083063 Iustin Pop
66 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
67 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
68 a8083063 Iustin Pop

69 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
70 a8083063 Iustin Pop
    validity.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    """
73 a8083063 Iustin Pop
    self.processor = processor
74 a8083063 Iustin Pop
    self.op = op
75 a8083063 Iustin Pop
    self.cfg = cfg
76 a8083063 Iustin Pop
    self.sstore = sstore
77 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
78 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
79 a8083063 Iustin Pop
      if attr_val is None:
80 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
81 3ecf6786 Iustin Pop
                                   attr_name)
82 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
83 a8083063 Iustin Pop
      if not cfg.IsCluster():
84 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
85 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
86 a8083063 Iustin Pop
      if self.REQ_MASTER:
87 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
88 a8083063 Iustin Pop
        if master != socket.gethostname():
89 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
90 3ecf6786 Iustin Pop
                                     " node %s" % master)
91 a8083063 Iustin Pop
92 a8083063 Iustin Pop
  def CheckPrereq(self):
93 a8083063 Iustin Pop
    """Check prerequisites for this LU.
94 a8083063 Iustin Pop

95 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
96 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
97 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
98 a8083063 Iustin Pop
    allowed.
99 a8083063 Iustin Pop

100 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
101 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
102 a8083063 Iustin Pop

103 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
104 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
105 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
106 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
107 a8083063 Iustin Pop

108 a8083063 Iustin Pop
    """
109 a8083063 Iustin Pop
    raise NotImplementedError
110 a8083063 Iustin Pop
111 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
112 a8083063 Iustin Pop
    """Execute the LU.
113 a8083063 Iustin Pop

114 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
115 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
116 a8083063 Iustin Pop
    code, or expected.
117 a8083063 Iustin Pop

118 a8083063 Iustin Pop
    """
119 a8083063 Iustin Pop
    raise NotImplementedError
120 a8083063 Iustin Pop
121 a8083063 Iustin Pop
  def BuildHooksEnv(self):
122 a8083063 Iustin Pop
    """Build hooks environment for this LU.
123 a8083063 Iustin Pop

124 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
125 a8083063 Iustin Pop
    containing the environment that will be used for running the
126 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
127 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
128 a8083063 Iustin Pop
    the hook should run after the execution.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
131 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
132 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
133 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
134 a8083063 Iustin Pop

135 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
136 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
137 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
138 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
139 a8083063 Iustin Pop
    None).
140 a8083063 Iustin Pop

141 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
142 a8083063 Iustin Pop
    not be called.
143 a8083063 Iustin Pop

144 a8083063 Iustin Pop
    """
145 a8083063 Iustin Pop
    raise NotImplementedError
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
148 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
149 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
150 a8083063 Iustin Pop

151 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
152 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
153 a8083063 Iustin Pop

154 a8083063 Iustin Pop
  """
155 a8083063 Iustin Pop
  HPATH = None
156 a8083063 Iustin Pop
  HTYPE = None
157 a8083063 Iustin Pop
158 a8083063 Iustin Pop
  def BuildHooksEnv(self):
159 a8083063 Iustin Pop
    """Build hooks env.
160 a8083063 Iustin Pop

161 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
162 a8083063 Iustin Pop

163 a8083063 Iustin Pop
    """
164 a8083063 Iustin Pop
    return
165 a8083063 Iustin Pop
166 a8083063 Iustin Pop
167 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
168 83120a01 Michael Hanselmann
  """Returns list of checked and expanded nodes.
169 83120a01 Michael Hanselmann

170 83120a01 Michael Hanselmann
  Args:
171 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
172 83120a01 Michael Hanselmann

173 83120a01 Michael Hanselmann
  """
174 3312b702 Iustin Pop
  if not isinstance(nodes, list):
175 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
176 dcb93971 Michael Hanselmann
177 dcb93971 Michael Hanselmann
  if nodes:
178 3312b702 Iustin Pop
    wanted = []
179 dcb93971 Michael Hanselmann
180 dcb93971 Michael Hanselmann
    for name in nodes:
181 dcb93971 Michael Hanselmann
      node = lu.cfg.GetNodeInfo(lu.cfg.ExpandNodeName(name))
182 dcb93971 Michael Hanselmann
      if node is None:
183 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
184 3312b702 Iustin Pop
      wanted.append(node)
185 dcb93971 Michael Hanselmann
186 dcb93971 Michael Hanselmann
  else:
187 3312b702 Iustin Pop
    wanted = [lu.cfg.GetNodeInfo(name) for name in lu.cfg.GetNodeList()]
188 3312b702 Iustin Pop
  return wanted
189 3312b702 Iustin Pop
190 3312b702 Iustin Pop
191 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
192 3312b702 Iustin Pop
  """Returns list of checked and expanded instances.
193 3312b702 Iustin Pop

194 3312b702 Iustin Pop
  Args:
195 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
196 3312b702 Iustin Pop

197 3312b702 Iustin Pop
  """
198 3312b702 Iustin Pop
  if not isinstance(instances, list):
199 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
200 3312b702 Iustin Pop
201 3312b702 Iustin Pop
  if instances:
202 3312b702 Iustin Pop
    wanted = []
203 3312b702 Iustin Pop
204 3312b702 Iustin Pop
    for name in instances:
205 3312b702 Iustin Pop
      instance = lu.cfg.GetInstanceInfo(lu.cfg.ExpandInstanceName(name))
206 3312b702 Iustin Pop
      if instance is None:
207 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
208 3312b702 Iustin Pop
      wanted.append(instance)
209 3312b702 Iustin Pop
210 3312b702 Iustin Pop
  else:
211 3312b702 Iustin Pop
    wanted = [lu.cfg.GetInstanceInfo(name)
212 3312b702 Iustin Pop
              for name in lu.cfg.GetInstanceList()]
213 3312b702 Iustin Pop
  return wanted
214 dcb93971 Michael Hanselmann
215 dcb93971 Michael Hanselmann
216 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
217 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
218 83120a01 Michael Hanselmann

219 83120a01 Michael Hanselmann
  Args:
220 83120a01 Michael Hanselmann
    static: Static fields
221 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
222 83120a01 Michael Hanselmann

223 83120a01 Michael Hanselmann
  """
224 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
225 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
226 dcb93971 Michael Hanselmann
227 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
228 dcb93971 Michael Hanselmann
229 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
230 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
231 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
232 3ecf6786 Iustin Pop
                                          difference(all_fields)))
233 dcb93971 Michael Hanselmann
234 dcb93971 Michael Hanselmann
235 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
236 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
237 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
238 ecb215b5 Michael Hanselmann

239 ecb215b5 Michael Hanselmann
  Args:
240 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
241 396e1b78 Michael Hanselmann
  """
242 396e1b78 Michael Hanselmann
  env = {
243 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
244 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
245 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
246 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
247 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
248 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
249 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
250 396e1b78 Michael Hanselmann
  }
251 396e1b78 Michael Hanselmann
252 396e1b78 Michael Hanselmann
  if nics:
253 396e1b78 Michael Hanselmann
    nic_count = len(nics)
254 396e1b78 Michael Hanselmann
    for idx, (ip, bridge) in enumerate(nics):
255 396e1b78 Michael Hanselmann
      if ip is None:
256 396e1b78 Michael Hanselmann
        ip = ""
257 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
258 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
259 396e1b78 Michael Hanselmann
  else:
260 396e1b78 Michael Hanselmann
    nic_count = 0
261 396e1b78 Michael Hanselmann
262 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
263 396e1b78 Michael Hanselmann
264 396e1b78 Michael Hanselmann
  return env
265 396e1b78 Michael Hanselmann
266 396e1b78 Michael Hanselmann
267 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
268 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
269 ecb215b5 Michael Hanselmann

270 ecb215b5 Michael Hanselmann
  Args:
271 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
272 ecb215b5 Michael Hanselmann
    override: dict of values to override
273 ecb215b5 Michael Hanselmann
  """
274 396e1b78 Michael Hanselmann
  args = {
275 396e1b78 Michael Hanselmann
    'name': instance.name,
276 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
277 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
278 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
279 396e1b78 Michael Hanselmann
    'status': instance.os,
280 396e1b78 Michael Hanselmann
    'memory': instance.memory,
281 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
282 396e1b78 Michael Hanselmann
    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
283 396e1b78 Michael Hanselmann
  }
284 396e1b78 Michael Hanselmann
  if override:
285 396e1b78 Michael Hanselmann
    args.update(override)
286 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
287 396e1b78 Michael Hanselmann
288 396e1b78 Michael Hanselmann
289 a8083063 Iustin Pop
def _UpdateEtcHosts(fullnode, ip):
290 a8083063 Iustin Pop
  """Ensure a node has a correct entry in /etc/hosts.
291 a8083063 Iustin Pop

292 a8083063 Iustin Pop
  Args:
293 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
294 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
295 a8083063 Iustin Pop

296 a8083063 Iustin Pop
  """
297 a8083063 Iustin Pop
  node = fullnode.split(".", 1)[0]
298 a8083063 Iustin Pop
299 a8083063 Iustin Pop
  f = open('/etc/hosts', 'r+')
300 a8083063 Iustin Pop
301 a8083063 Iustin Pop
  inthere = False
302 a8083063 Iustin Pop
303 a8083063 Iustin Pop
  save_lines = []
304 a8083063 Iustin Pop
  add_lines = []
305 a8083063 Iustin Pop
  removed = False
306 a8083063 Iustin Pop
307 a8083063 Iustin Pop
  while True:
308 a8083063 Iustin Pop
    rawline = f.readline()
309 a8083063 Iustin Pop
310 a8083063 Iustin Pop
    if not rawline:
311 a8083063 Iustin Pop
      # End of file
312 a8083063 Iustin Pop
      break
313 a8083063 Iustin Pop
314 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
315 a8083063 Iustin Pop
316 a8083063 Iustin Pop
    # Strip off comments
317 a8083063 Iustin Pop
    line = line.split('#')[0]
318 a8083063 Iustin Pop
319 a8083063 Iustin Pop
    if not line:
320 a8083063 Iustin Pop
      # Entire line was comment, skip
321 a8083063 Iustin Pop
      save_lines.append(rawline)
322 a8083063 Iustin Pop
      continue
323 a8083063 Iustin Pop
324 a8083063 Iustin Pop
    fields = line.split()
325 a8083063 Iustin Pop
326 a8083063 Iustin Pop
    haveall = True
327 a8083063 Iustin Pop
    havesome = False
328 a8083063 Iustin Pop
    for spec in [ ip, fullnode, node ]:
329 a8083063 Iustin Pop
      if spec not in fields:
330 a8083063 Iustin Pop
        haveall = False
331 a8083063 Iustin Pop
      if spec in fields:
332 a8083063 Iustin Pop
        havesome = True
333 a8083063 Iustin Pop
334 a8083063 Iustin Pop
    if haveall:
335 a8083063 Iustin Pop
      inthere = True
336 a8083063 Iustin Pop
      save_lines.append(rawline)
337 a8083063 Iustin Pop
      continue
338 a8083063 Iustin Pop
339 a8083063 Iustin Pop
    if havesome and not haveall:
340 a8083063 Iustin Pop
      # Line (old, or manual?) which is missing some.  Remove.
341 a8083063 Iustin Pop
      removed = True
342 a8083063 Iustin Pop
      continue
343 a8083063 Iustin Pop
344 a8083063 Iustin Pop
    save_lines.append(rawline)
345 a8083063 Iustin Pop
346 a8083063 Iustin Pop
  if not inthere:
347 a8083063 Iustin Pop
    add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
348 a8083063 Iustin Pop
349 a8083063 Iustin Pop
  if removed:
350 a8083063 Iustin Pop
    if add_lines:
351 a8083063 Iustin Pop
      save_lines = save_lines + add_lines
352 a8083063 Iustin Pop
353 a8083063 Iustin Pop
    # We removed a line, write a new file and replace old.
354 a8083063 Iustin Pop
    fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc')
355 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
356 a8083063 Iustin Pop
    newfile.write(''.join(save_lines))
357 a8083063 Iustin Pop
    newfile.close()
358 a8083063 Iustin Pop
    os.rename(tmpname, '/etc/hosts')
359 a8083063 Iustin Pop
360 a8083063 Iustin Pop
  elif add_lines:
361 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
362 a8083063 Iustin Pop
    f.seek(0, 2)
363 a8083063 Iustin Pop
    for add in add_lines:
364 a8083063 Iustin Pop
      f.write(add)
365 a8083063 Iustin Pop
366 a8083063 Iustin Pop
  f.close()
367 a8083063 Iustin Pop
368 a8083063 Iustin Pop
369 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
370 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
371 a8083063 Iustin Pop

372 a8083063 Iustin Pop
  Args:
373 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
374 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
375 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
376 a8083063 Iustin Pop

377 a8083063 Iustin Pop
  """
378 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
379 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
380 a8083063 Iustin Pop
  else:
381 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
382 a8083063 Iustin Pop
383 a8083063 Iustin Pop
  inthere = False
384 a8083063 Iustin Pop
385 a8083063 Iustin Pop
  save_lines = []
386 a8083063 Iustin Pop
  add_lines = []
387 a8083063 Iustin Pop
  removed = False
388 a8083063 Iustin Pop
389 a8083063 Iustin Pop
  while True:
390 a8083063 Iustin Pop
    rawline = f.readline()
391 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
392 a8083063 Iustin Pop
393 a8083063 Iustin Pop
    if not rawline:
394 a8083063 Iustin Pop
      # End of file
395 a8083063 Iustin Pop
      break
396 a8083063 Iustin Pop
397 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
398 a8083063 Iustin Pop
399 a8083063 Iustin Pop
    parts = line.split(' ')
400 a8083063 Iustin Pop
    fields = parts[0].split(',')
401 a8083063 Iustin Pop
    key = parts[2]
402 a8083063 Iustin Pop
403 a8083063 Iustin Pop
    haveall = True
404 a8083063 Iustin Pop
    havesome = False
405 a8083063 Iustin Pop
    for spec in [ ip, fullnode ]:
406 a8083063 Iustin Pop
      if spec not in fields:
407 a8083063 Iustin Pop
        haveall = False
408 a8083063 Iustin Pop
      if spec in fields:
409 a8083063 Iustin Pop
        havesome = True
410 a8083063 Iustin Pop
411 a8083063 Iustin Pop
    logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
412 a8083063 Iustin Pop
    if haveall and key == pubkey:
413 a8083063 Iustin Pop
      inthere = True
414 a8083063 Iustin Pop
      save_lines.append(rawline)
415 a8083063 Iustin Pop
      logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
416 a8083063 Iustin Pop
      continue
417 a8083063 Iustin Pop
418 a8083063 Iustin Pop
    if havesome and (not haveall or key != pubkey):
419 a8083063 Iustin Pop
      removed = True
420 a8083063 Iustin Pop
      logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
421 a8083063 Iustin Pop
      continue
422 a8083063 Iustin Pop
423 a8083063 Iustin Pop
    save_lines.append(rawline)
424 a8083063 Iustin Pop
425 a8083063 Iustin Pop
  if not inthere:
426 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
427 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
428 a8083063 Iustin Pop
429 a8083063 Iustin Pop
  if removed:
430 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
431 a8083063 Iustin Pop
432 a8083063 Iustin Pop
    # Write a new file and replace old.
433 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
434 82122173 Iustin Pop
                                   constants.DATA_DIR)
435 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
436 82122173 Iustin Pop
    try:
437 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
438 82122173 Iustin Pop
    finally:
439 82122173 Iustin Pop
      newfile.close()
440 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
441 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
442 a8083063 Iustin Pop
443 a8083063 Iustin Pop
  elif add_lines:
444 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
445 a8083063 Iustin Pop
    f.seek(0, 2)
446 a8083063 Iustin Pop
    for add in add_lines:
447 a8083063 Iustin Pop
      f.write(add)
448 a8083063 Iustin Pop
449 a8083063 Iustin Pop
  f.close()
450 a8083063 Iustin Pop
451 a8083063 Iustin Pop
452 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
453 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
454 a8083063 Iustin Pop

455 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
456 a8083063 Iustin Pop
  is the error message.
457 a8083063 Iustin Pop

458 a8083063 Iustin Pop
  """
459 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
460 a8083063 Iustin Pop
  if vgsize is None:
461 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
462 a8083063 Iustin Pop
  elif vgsize < 20480:
463 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
464 191a8385 Guido Trotter
            (vgname, vgsize))
465 a8083063 Iustin Pop
  return None
466 a8083063 Iustin Pop
467 a8083063 Iustin Pop
468 a8083063 Iustin Pop
def _InitSSHSetup(node):
469 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
470 a8083063 Iustin Pop

471 a8083063 Iustin Pop

472 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
473 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
474 a8083063 Iustin Pop

475 a8083063 Iustin Pop
  Args:
476 a8083063 Iustin Pop
    node: the name of this host as a fqdn
477 a8083063 Iustin Pop

478 a8083063 Iustin Pop
  """
479 a8083063 Iustin Pop
  if os.path.exists('/root/.ssh/id_dsa'):
480 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa')
481 a8083063 Iustin Pop
  if os.path.exists('/root/.ssh/id_dsa.pub'):
482 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa.pub')
483 a8083063 Iustin Pop
484 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/id_dsa')
485 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/id_dsa.pub')
486 a8083063 Iustin Pop
487 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
488 a8083063 Iustin Pop
                         "-f", "/root/.ssh/id_dsa",
489 a8083063 Iustin Pop
                         "-q", "-N", ""])
490 a8083063 Iustin Pop
  if result.failed:
491 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
492 3ecf6786 Iustin Pop
                             result.output)
493 a8083063 Iustin Pop
494 a8083063 Iustin Pop
  f = open('/root/.ssh/id_dsa.pub', 'r')
495 a8083063 Iustin Pop
  try:
496 a8083063 Iustin Pop
    utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192))
497 a8083063 Iustin Pop
  finally:
498 a8083063 Iustin Pop
    f.close()
499 a8083063 Iustin Pop
500 a8083063 Iustin Pop
501 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
502 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
503 a8083063 Iustin Pop

504 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
505 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
506 a8083063 Iustin Pop

507 a8083063 Iustin Pop
  """
508 a8083063 Iustin Pop
  # Create pseudo random password
509 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
510 a8083063 Iustin Pop
  # and write it into sstore
511 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
512 a8083063 Iustin Pop
513 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
514 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
515 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
516 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
517 a8083063 Iustin Pop
  if result.failed:
518 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
519 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
520 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
521 a8083063 Iustin Pop
522 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
523 a8083063 Iustin Pop
524 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
525 a8083063 Iustin Pop
526 a8083063 Iustin Pop
  if result.failed:
527 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
528 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
529 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
530 a8083063 Iustin Pop
531 a8083063 Iustin Pop
532 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
533 a8083063 Iustin Pop
  """Initialise the cluster.
534 a8083063 Iustin Pop

535 a8083063 Iustin Pop
  """
536 a8083063 Iustin Pop
  HPATH = "cluster-init"
537 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
538 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
539 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
540 a8083063 Iustin Pop
  REQ_CLUSTER = False
541 a8083063 Iustin Pop
542 a8083063 Iustin Pop
  def BuildHooksEnv(self):
543 a8083063 Iustin Pop
    """Build hooks env.
544 a8083063 Iustin Pop

545 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
546 a8083063 Iustin Pop
    ourselves in the post-run node list.
547 a8083063 Iustin Pop

548 a8083063 Iustin Pop
    """
549 396e1b78 Michael Hanselmann
    env = {
550 396e1b78 Michael Hanselmann
      "CLUSTER": self.op.cluster_name,
551 396e1b78 Michael Hanselmann
      "MASTER": self.hostname['hostname_full'],
552 396e1b78 Michael Hanselmann
      }
553 a8083063 Iustin Pop
    return env, [], [self.hostname['hostname_full']]
554 a8083063 Iustin Pop
555 a8083063 Iustin Pop
  def CheckPrereq(self):
556 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
557 a8083063 Iustin Pop

558 a8083063 Iustin Pop
    """
559 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
560 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
561 a8083063 Iustin Pop
562 a8083063 Iustin Pop
    hostname_local = socket.gethostname()
563 a8083063 Iustin Pop
    self.hostname = hostname = utils.LookupHostname(hostname_local)
564 a8083063 Iustin Pop
    if not hostname:
565 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot resolve my own hostname ('%s')" %
566 3ecf6786 Iustin Pop
                                 hostname_local)
567 a8083063 Iustin Pop
568 ff98055b Iustin Pop
    if hostname["hostname_full"] != hostname_local:
569 ff98055b Iustin Pop
      raise errors.OpPrereqError("My own hostname (%s) does not match the"
570 ff98055b Iustin Pop
                                 " resolver (%s): probably not using FQDN"
571 ff98055b Iustin Pop
                                 " for hostname." %
572 ff98055b Iustin Pop
                                 (hostname_local, hostname["hostname_full"]))
573 ff98055b Iustin Pop
574 130e907e Iustin Pop
    if hostname["ip"].startswith("127."):
575 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
576 130e907e Iustin Pop
                                 " range (%s). Please fix DNS or /etc/hosts." %
577 130e907e Iustin Pop
                                 (hostname["ip"],))
578 130e907e Iustin Pop
579 a8083063 Iustin Pop
    self.clustername = clustername = utils.LookupHostname(self.op.cluster_name)
580 a8083063 Iustin Pop
    if not clustername:
581 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot resolve given cluster name ('%s')"
582 3ecf6786 Iustin Pop
                                 % self.op.cluster_name)
583 a8083063 Iustin Pop
584 a8083063 Iustin Pop
    result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname['ip']])
585 a8083063 Iustin Pop
    if result.failed:
586 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
587 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
588 3ecf6786 Iustin Pop
                                 " belong to this host."
589 3ecf6786 Iustin Pop
                                 " Aborting." % hostname['ip'])
590 a8083063 Iustin Pop
591 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
592 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
593 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
594 a8083063 Iustin Pop
    if secondary_ip and secondary_ip != hostname['ip']:
595 a8083063 Iustin Pop
      result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip])
596 a8083063 Iustin Pop
      if result.failed:
597 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("You gave %s as secondary IP,\n"
598 3ecf6786 Iustin Pop
                                   "but it does not belong to this host." %
599 3ecf6786 Iustin Pop
                                   secondary_ip)
600 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
601 a8083063 Iustin Pop
602 a8083063 Iustin Pop
    # checks presence of the volume group given
603 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
604 a8083063 Iustin Pop
605 a8083063 Iustin Pop
    if vgstatus:
606 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
607 a8083063 Iustin Pop
608 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
609 a8083063 Iustin Pop
                    self.op.mac_prefix):
610 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
611 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
612 a8083063 Iustin Pop
613 a8083063 Iustin Pop
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
614 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
615 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
616 a8083063 Iustin Pop
617 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
618 880478f8 Iustin Pop
    if result.failed:
619 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
620 8925faaa Iustin Pop
                                 (self.op.master_netdev,
621 8925faaa Iustin Pop
                                  result.output.strip()))
622 880478f8 Iustin Pop
623 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
624 a8083063 Iustin Pop
    """Initialize the cluster.
625 a8083063 Iustin Pop

626 a8083063 Iustin Pop
    """
627 a8083063 Iustin Pop
    clustername = self.clustername
628 a8083063 Iustin Pop
    hostname = self.hostname
629 a8083063 Iustin Pop
630 a8083063 Iustin Pop
    # set up the simple store
631 a8083063 Iustin Pop
    ss = ssconf.SimpleStore()
632 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
633 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname['hostname_full'])
634 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername['ip'])
635 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
636 5fcdc80d Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername['hostname'])
637 a8083063 Iustin Pop
638 a8083063 Iustin Pop
    # set up the inter-node password and certificate
639 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
640 a8083063 Iustin Pop
641 a8083063 Iustin Pop
    # start the master ip
642 a8083063 Iustin Pop
    rpc.call_node_start_master(hostname['hostname_full'])
643 a8083063 Iustin Pop
644 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
645 a8083063 Iustin Pop
    f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r')
646 a8083063 Iustin Pop
    try:
647 a8083063 Iustin Pop
      sshline = f.read()
648 a8083063 Iustin Pop
    finally:
649 a8083063 Iustin Pop
      f.close()
650 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
651 a8083063 Iustin Pop
652 a8083063 Iustin Pop
    _UpdateEtcHosts(hostname['hostname_full'],
653 a8083063 Iustin Pop
                    hostname['ip'],
654 a8083063 Iustin Pop
                    )
655 a8083063 Iustin Pop
656 a8083063 Iustin Pop
    _UpdateKnownHosts(hostname['hostname_full'],
657 a8083063 Iustin Pop
                      hostname['ip'],
658 a8083063 Iustin Pop
                      sshkey,
659 a8083063 Iustin Pop
                      )
660 a8083063 Iustin Pop
661 a8083063 Iustin Pop
    _InitSSHSetup(hostname['hostname'])
662 a8083063 Iustin Pop
663 a8083063 Iustin Pop
    # init of cluster config file
664 a8083063 Iustin Pop
    cfgw = config.ConfigWriter()
665 a8083063 Iustin Pop
    cfgw.InitConfig(hostname['hostname'], hostname['ip'], self.secondary_ip,
666 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
667 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
668 a8083063 Iustin Pop
669 a8083063 Iustin Pop
670 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
671 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
672 a8083063 Iustin Pop

673 a8083063 Iustin Pop
  """
674 a8083063 Iustin Pop
  _OP_REQP = []
675 a8083063 Iustin Pop
676 a8083063 Iustin Pop
  def CheckPrereq(self):
677 a8083063 Iustin Pop
    """Check prerequisites.
678 a8083063 Iustin Pop

679 a8083063 Iustin Pop
    This checks whether the cluster is empty.
680 a8083063 Iustin Pop

681 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
682 a8083063 Iustin Pop

683 a8083063 Iustin Pop
    """
684 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
685 a8083063 Iustin Pop
686 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
687 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
688 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
689 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
690 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
691 db915bd1 Michael Hanselmann
    if instancelist:
692 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
693 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
694 a8083063 Iustin Pop
695 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
696 a8083063 Iustin Pop
    """Destroys the cluster.
697 a8083063 Iustin Pop

698 a8083063 Iustin Pop
    """
699 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa')
700 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa.pub')
701 880478f8 Iustin Pop
    rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
702 a8083063 Iustin Pop
703 a8083063 Iustin Pop
704 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
705 a8083063 Iustin Pop
  """Verifies the cluster status.
706 a8083063 Iustin Pop

707 a8083063 Iustin Pop
  """
708 a8083063 Iustin Pop
  _OP_REQP = []
709 a8083063 Iustin Pop
710 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
711 a8083063 Iustin Pop
                  remote_version, feedback_fn):
712 a8083063 Iustin Pop
    """Run multiple tests against a node.
713 a8083063 Iustin Pop

714 a8083063 Iustin Pop
    Test list:
715 a8083063 Iustin Pop
      - compares ganeti version
716 a8083063 Iustin Pop
      - checks vg existance and size > 20G
717 a8083063 Iustin Pop
      - checks config file checksum
718 a8083063 Iustin Pop
      - checks ssh to other nodes
719 a8083063 Iustin Pop

720 a8083063 Iustin Pop
    Args:
721 a8083063 Iustin Pop
      node: name of the node to check
722 a8083063 Iustin Pop
      file_list: required list of files
723 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
724 098c0958 Michael Hanselmann

725 a8083063 Iustin Pop
    """
726 a8083063 Iustin Pop
    # compares ganeti version
727 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
728 a8083063 Iustin Pop
    if not remote_version:
729 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
730 a8083063 Iustin Pop
      return True
731 a8083063 Iustin Pop
732 a8083063 Iustin Pop
    if local_version != remote_version:
733 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
734 a8083063 Iustin Pop
                      (local_version, node, remote_version))
735 a8083063 Iustin Pop
      return True
736 a8083063 Iustin Pop
737 a8083063 Iustin Pop
    # checks vg existance and size > 20G
738 a8083063 Iustin Pop
739 a8083063 Iustin Pop
    bad = False
740 a8083063 Iustin Pop
    if not vglist:
741 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
742 a8083063 Iustin Pop
                      (node,))
743 a8083063 Iustin Pop
      bad = True
744 a8083063 Iustin Pop
    else:
745 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
746 a8083063 Iustin Pop
      if vgstatus:
747 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
748 a8083063 Iustin Pop
        bad = True
749 a8083063 Iustin Pop
750 a8083063 Iustin Pop
    # checks config file checksum
751 a8083063 Iustin Pop
    # checks ssh to any
752 a8083063 Iustin Pop
753 a8083063 Iustin Pop
    if 'filelist' not in node_result:
754 a8083063 Iustin Pop
      bad = True
755 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
756 a8083063 Iustin Pop
    else:
757 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
758 a8083063 Iustin Pop
      for file_name in file_list:
759 a8083063 Iustin Pop
        if file_name not in remote_cksum:
760 a8083063 Iustin Pop
          bad = True
761 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
762 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
763 a8083063 Iustin Pop
          bad = True
764 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
765 a8083063 Iustin Pop
766 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
767 a8083063 Iustin Pop
      bad = True
768 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
769 a8083063 Iustin Pop
    else:
770 a8083063 Iustin Pop
      if node_result['nodelist']:
771 a8083063 Iustin Pop
        bad = True
772 a8083063 Iustin Pop
        for node in node_result['nodelist']:
773 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
774 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
775 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
776 a8083063 Iustin Pop
    if hyp_result is not None:
777 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
778 a8083063 Iustin Pop
    return bad
779 a8083063 Iustin Pop
780 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
781 a8083063 Iustin Pop
    """Verify an instance.
782 a8083063 Iustin Pop

783 a8083063 Iustin Pop
    This function checks to see if the required block devices are
784 a8083063 Iustin Pop
    available on the instance's node.
785 a8083063 Iustin Pop

786 a8083063 Iustin Pop
    """
787 a8083063 Iustin Pop
    bad = False
788 a8083063 Iustin Pop
789 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
790 a8083063 Iustin Pop
    if not instance in instancelist:
791 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
792 a8083063 Iustin Pop
                      (instance, instancelist))
793 a8083063 Iustin Pop
      bad = True
794 a8083063 Iustin Pop
795 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
796 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
797 a8083063 Iustin Pop
798 a8083063 Iustin Pop
    node_vol_should = {}
799 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
800 a8083063 Iustin Pop
801 a8083063 Iustin Pop
    for node in node_vol_should:
802 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
803 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
804 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
805 a8083063 Iustin Pop
                          (volume, node))
806 a8083063 Iustin Pop
          bad = True
807 a8083063 Iustin Pop
808 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
809 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
810 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
811 a8083063 Iustin Pop
                        (instance, node_current))
812 a8083063 Iustin Pop
        bad = True
813 a8083063 Iustin Pop
814 a8083063 Iustin Pop
    for node in node_instance:
815 a8083063 Iustin Pop
      if (not node == node_current):
816 a8083063 Iustin Pop
        if instance in node_instance[node]:
817 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
818 a8083063 Iustin Pop
                          (instance, node))
819 a8083063 Iustin Pop
          bad = True
820 a8083063 Iustin Pop
821 a8083063 Iustin Pop
    return not bad
822 a8083063 Iustin Pop
823 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
824 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
825 a8083063 Iustin Pop

826 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
827 a8083063 Iustin Pop
    reported as unknown.
828 a8083063 Iustin Pop

829 a8083063 Iustin Pop
    """
830 a8083063 Iustin Pop
    bad = False
831 a8083063 Iustin Pop
832 a8083063 Iustin Pop
    for node in node_vol_is:
833 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
834 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
835 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
836 a8083063 Iustin Pop
                      (volume, node))
837 a8083063 Iustin Pop
          bad = True
838 a8083063 Iustin Pop
    return bad
839 a8083063 Iustin Pop
840 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
841 a8083063 Iustin Pop
    """Verify the list of running instances.
842 a8083063 Iustin Pop

843 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
844 a8083063 Iustin Pop

845 a8083063 Iustin Pop
    """
846 a8083063 Iustin Pop
    bad = False
847 a8083063 Iustin Pop
    for node in node_instance:
848 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
849 a8083063 Iustin Pop
        if runninginstance not in instancelist:
850 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
851 a8083063 Iustin Pop
                          (runninginstance, node))
852 a8083063 Iustin Pop
          bad = True
853 a8083063 Iustin Pop
    return bad
854 a8083063 Iustin Pop
855 a8083063 Iustin Pop
  def CheckPrereq(self):
856 a8083063 Iustin Pop
    """Check prerequisites.
857 a8083063 Iustin Pop

858 a8083063 Iustin Pop
    This has no prerequisites.
859 a8083063 Iustin Pop

860 a8083063 Iustin Pop
    """
861 a8083063 Iustin Pop
    pass
862 a8083063 Iustin Pop
863 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
864 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
865 a8083063 Iustin Pop

866 a8083063 Iustin Pop
    """
867 a8083063 Iustin Pop
    bad = False
868 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
869 a8083063 Iustin Pop
    self.cfg.VerifyConfig()
870 a8083063 Iustin Pop
871 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
872 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
873 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
874 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
875 a8083063 Iustin Pop
    node_volume = {}
876 a8083063 Iustin Pop
    node_instance = {}
877 a8083063 Iustin Pop
878 a8083063 Iustin Pop
    # FIXME: verify OS list
879 a8083063 Iustin Pop
    # do local checksums
880 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
881 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
882 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
883 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
884 a8083063 Iustin Pop
885 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
886 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
887 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
888 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
889 a8083063 Iustin Pop
    node_verify_param = {
890 a8083063 Iustin Pop
      'filelist': file_names,
891 a8083063 Iustin Pop
      'nodelist': nodelist,
892 a8083063 Iustin Pop
      'hypervisor': None,
893 a8083063 Iustin Pop
      }
894 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
895 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
896 a8083063 Iustin Pop
897 a8083063 Iustin Pop
    for node in nodelist:
898 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
899 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
900 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
901 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
902 a8083063 Iustin Pop
      bad = bad or result
903 a8083063 Iustin Pop
904 a8083063 Iustin Pop
      # node_volume
905 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
906 a8083063 Iustin Pop
907 a8083063 Iustin Pop
      if type(volumeinfo) != dict:
908 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
909 a8083063 Iustin Pop
        bad = True
910 a8083063 Iustin Pop
        continue
911 a8083063 Iustin Pop
912 a8083063 Iustin Pop
      node_volume[node] = volumeinfo
913 a8083063 Iustin Pop
914 a8083063 Iustin Pop
      # node_instance
915 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
916 a8083063 Iustin Pop
      if type(nodeinstance) != list:
917 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
918 a8083063 Iustin Pop
        bad = True
919 a8083063 Iustin Pop
        continue
920 a8083063 Iustin Pop
921 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
922 a8083063 Iustin Pop
923 a8083063 Iustin Pop
    node_vol_should = {}
924 a8083063 Iustin Pop
925 a8083063 Iustin Pop
    for instance in instancelist:
926 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
927 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
928 a8083063 Iustin Pop
                                     feedback_fn)
929 a8083063 Iustin Pop
      bad = bad or result
930 a8083063 Iustin Pop
931 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
932 a8083063 Iustin Pop
933 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
934 a8083063 Iustin Pop
935 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
936 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
937 a8083063 Iustin Pop
                                       feedback_fn)
938 a8083063 Iustin Pop
    bad = bad or result
939 a8083063 Iustin Pop
940 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
941 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
942 a8083063 Iustin Pop
                                         feedback_fn)
943 a8083063 Iustin Pop
    bad = bad or result
944 a8083063 Iustin Pop
945 a8083063 Iustin Pop
    return int(bad)
946 a8083063 Iustin Pop
947 a8083063 Iustin Pop
948 a8083063 Iustin Pop
def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
949 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
950 a8083063 Iustin Pop

951 a8083063 Iustin Pop
  """
952 a8083063 Iustin Pop
  if not instance.disks:
953 a8083063 Iustin Pop
    return True
954 a8083063 Iustin Pop
955 a8083063 Iustin Pop
  if not oneshot:
956 a8083063 Iustin Pop
    logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
957 a8083063 Iustin Pop
958 a8083063 Iustin Pop
  node = instance.primary_node
959 a8083063 Iustin Pop
960 a8083063 Iustin Pop
  for dev in instance.disks:
961 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
962 a8083063 Iustin Pop
963 a8083063 Iustin Pop
  retries = 0
964 a8083063 Iustin Pop
  while True:
965 a8083063 Iustin Pop
    max_time = 0
966 a8083063 Iustin Pop
    done = True
967 a8083063 Iustin Pop
    cumul_degraded = False
968 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
969 a8083063 Iustin Pop
    if not rstats:
970 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
971 a8083063 Iustin Pop
      retries += 1
972 a8083063 Iustin Pop
      if retries >= 10:
973 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
974 3ecf6786 Iustin Pop
                                 " aborting." % node)
975 a8083063 Iustin Pop
      time.sleep(6)
976 a8083063 Iustin Pop
      continue
977 a8083063 Iustin Pop
    retries = 0
978 a8083063 Iustin Pop
    for i in range(len(rstats)):
979 a8083063 Iustin Pop
      mstat = rstats[i]
980 a8083063 Iustin Pop
      if mstat is None:
981 a8083063 Iustin Pop
        logger.ToStderr("Can't compute data for node %s/%s" %
982 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
983 a8083063 Iustin Pop
        continue
984 a8083063 Iustin Pop
      perc_done, est_time, is_degraded = mstat
985 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
986 a8083063 Iustin Pop
      if perc_done is not None:
987 a8083063 Iustin Pop
        done = False
988 a8083063 Iustin Pop
        if est_time is not None:
989 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
990 a8083063 Iustin Pop
          max_time = est_time
991 a8083063 Iustin Pop
        else:
992 a8083063 Iustin Pop
          rem_time = "no time estimate"
993 a8083063 Iustin Pop
        logger.ToStdout("- device %s: %5.2f%% done, %s" %
994 a8083063 Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
995 a8083063 Iustin Pop
    if done or oneshot:
996 a8083063 Iustin Pop
      break
997 a8083063 Iustin Pop
998 a8083063 Iustin Pop
    if unlock:
999 a8083063 Iustin Pop
      utils.Unlock('cmd')
1000 a8083063 Iustin Pop
    try:
1001 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1002 a8083063 Iustin Pop
    finally:
1003 a8083063 Iustin Pop
      if unlock:
1004 a8083063 Iustin Pop
        utils.Lock('cmd')
1005 a8083063 Iustin Pop
1006 a8083063 Iustin Pop
  if done:
1007 a8083063 Iustin Pop
    logger.ToStdout("Instance %s's disks are in sync." % instance.name)
1008 a8083063 Iustin Pop
  return not cumul_degraded
1009 a8083063 Iustin Pop
1010 a8083063 Iustin Pop
1011 a8083063 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary):
1012 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1013 a8083063 Iustin Pop

1014 a8083063 Iustin Pop
  """
1015 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1016 a8083063 Iustin Pop
1017 a8083063 Iustin Pop
  result = True
1018 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1019 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1020 a8083063 Iustin Pop
    if not rstats:
1021 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1022 a8083063 Iustin Pop
      result = False
1023 a8083063 Iustin Pop
    else:
1024 a8083063 Iustin Pop
      result = result and (not rstats[5])
1025 a8083063 Iustin Pop
  if dev.children:
1026 a8083063 Iustin Pop
    for child in dev.children:
1027 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1028 a8083063 Iustin Pop
1029 a8083063 Iustin Pop
  return result
1030 a8083063 Iustin Pop
1031 a8083063 Iustin Pop
1032 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1033 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1034 a8083063 Iustin Pop

1035 a8083063 Iustin Pop
  """
1036 a8083063 Iustin Pop
  _OP_REQP = []
1037 a8083063 Iustin Pop
1038 a8083063 Iustin Pop
  def CheckPrereq(self):
1039 a8083063 Iustin Pop
    """Check prerequisites.
1040 a8083063 Iustin Pop

1041 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1042 a8083063 Iustin Pop

1043 a8083063 Iustin Pop
    """
1044 a8083063 Iustin Pop
    return
1045 a8083063 Iustin Pop
1046 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1047 a8083063 Iustin Pop
    """Compute the list of OSes.
1048 a8083063 Iustin Pop

1049 a8083063 Iustin Pop
    """
1050 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1051 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1052 a8083063 Iustin Pop
    if node_data == False:
1053 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1054 a8083063 Iustin Pop
    return node_data
1055 a8083063 Iustin Pop
1056 a8083063 Iustin Pop
1057 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1058 a8083063 Iustin Pop
  """Logical unit for removing a node.
1059 a8083063 Iustin Pop

1060 a8083063 Iustin Pop
  """
1061 a8083063 Iustin Pop
  HPATH = "node-remove"
1062 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1063 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1064 a8083063 Iustin Pop
1065 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1066 a8083063 Iustin Pop
    """Build hooks env.
1067 a8083063 Iustin Pop

1068 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1069 a8083063 Iustin Pop
    node would not allows itself to run.
1070 a8083063 Iustin Pop

1071 a8083063 Iustin Pop
    """
1072 396e1b78 Michael Hanselmann
    env = {
1073 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1074 396e1b78 Michael Hanselmann
      }
1075 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1076 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1077 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1078 a8083063 Iustin Pop
1079 a8083063 Iustin Pop
  def CheckPrereq(self):
1080 a8083063 Iustin Pop
    """Check prerequisites.
1081 a8083063 Iustin Pop

1082 a8083063 Iustin Pop
    This checks:
1083 a8083063 Iustin Pop
     - the node exists in the configuration
1084 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1085 a8083063 Iustin Pop
     - it's not the master
1086 a8083063 Iustin Pop

1087 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1088 a8083063 Iustin Pop

1089 a8083063 Iustin Pop
    """
1090 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1091 a8083063 Iustin Pop
    if node is None:
1092 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1093 a8083063 Iustin Pop
1094 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1095 a8083063 Iustin Pop
1096 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1097 a8083063 Iustin Pop
    if node.name == masternode:
1098 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1099 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1100 a8083063 Iustin Pop
1101 a8083063 Iustin Pop
    for instance_name in instance_list:
1102 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1103 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1104 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1105 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1106 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1107 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1108 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1109 a8083063 Iustin Pop
    self.op.node_name = node.name
1110 a8083063 Iustin Pop
    self.node = node
1111 a8083063 Iustin Pop
1112 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1113 a8083063 Iustin Pop
    """Removes the node from the cluster.
1114 a8083063 Iustin Pop

1115 a8083063 Iustin Pop
    """
1116 a8083063 Iustin Pop
    node = self.node
1117 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1118 a8083063 Iustin Pop
                node.name)
1119 a8083063 Iustin Pop
1120 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1121 a8083063 Iustin Pop
1122 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1123 a8083063 Iustin Pop
1124 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1125 a8083063 Iustin Pop
1126 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1127 a8083063 Iustin Pop
1128 a8083063 Iustin Pop
1129 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1130 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1131 a8083063 Iustin Pop

1132 a8083063 Iustin Pop
  """
1133 a8083063 Iustin Pop
  _OP_REQP = ["output_fields"]
1134 a8083063 Iustin Pop
1135 a8083063 Iustin Pop
  def CheckPrereq(self):
1136 a8083063 Iustin Pop
    """Check prerequisites.
1137 a8083063 Iustin Pop

1138 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1139 a8083063 Iustin Pop

1140 a8083063 Iustin Pop
    """
1141 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1142 a8083063 Iustin Pop
                                     "mtotal", "mnode", "mfree"])
1143 a8083063 Iustin Pop
1144 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "pinst", "sinst", "pip", "sip"],
1145 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1146 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1147 a8083063 Iustin Pop
1148 a8083063 Iustin Pop
1149 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1150 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1151 a8083063 Iustin Pop

1152 a8083063 Iustin Pop
    """
1153 a8083063 Iustin Pop
    nodenames = utils.NiceSort(self.cfg.GetNodeList())
1154 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1155 a8083063 Iustin Pop
1156 a8083063 Iustin Pop
1157 a8083063 Iustin Pop
    # begin data gathering
1158 a8083063 Iustin Pop
1159 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1160 a8083063 Iustin Pop
      live_data = {}
1161 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1162 a8083063 Iustin Pop
      for name in nodenames:
1163 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1164 a8083063 Iustin Pop
        if nodeinfo:
1165 a8083063 Iustin Pop
          live_data[name] = {
1166 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1167 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1168 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1169 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1170 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1171 a8083063 Iustin Pop
            }
1172 a8083063 Iustin Pop
        else:
1173 a8083063 Iustin Pop
          live_data[name] = {}
1174 a8083063 Iustin Pop
    else:
1175 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1176 a8083063 Iustin Pop
1177 a8083063 Iustin Pop
    node_to_primary = dict.fromkeys(nodenames, 0)
1178 a8083063 Iustin Pop
    node_to_secondary = dict.fromkeys(nodenames, 0)
1179 a8083063 Iustin Pop
1180 a8083063 Iustin Pop
    if "pinst" in self.op.output_fields or "sinst" in self.op.output_fields:
1181 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1182 a8083063 Iustin Pop
1183 a8083063 Iustin Pop
      for instance in instancelist:
1184 a8083063 Iustin Pop
        instanceinfo = self.cfg.GetInstanceInfo(instance)
1185 a8083063 Iustin Pop
        node_to_primary[instanceinfo.primary_node] += 1
1186 a8083063 Iustin Pop
        for secnode in instanceinfo.secondary_nodes:
1187 a8083063 Iustin Pop
          node_to_secondary[secnode] += 1
1188 a8083063 Iustin Pop
1189 a8083063 Iustin Pop
    # end data gathering
1190 a8083063 Iustin Pop
1191 a8083063 Iustin Pop
    output = []
1192 a8083063 Iustin Pop
    for node in nodelist:
1193 a8083063 Iustin Pop
      node_output = []
1194 a8083063 Iustin Pop
      for field in self.op.output_fields:
1195 a8083063 Iustin Pop
        if field == "name":
1196 a8083063 Iustin Pop
          val = node.name
1197 a8083063 Iustin Pop
        elif field == "pinst":
1198 a8083063 Iustin Pop
          val = node_to_primary[node.name]
1199 a8083063 Iustin Pop
        elif field == "sinst":
1200 a8083063 Iustin Pop
          val = node_to_secondary[node.name]
1201 a8083063 Iustin Pop
        elif field == "pip":
1202 a8083063 Iustin Pop
          val = node.primary_ip
1203 a8083063 Iustin Pop
        elif field == "sip":
1204 a8083063 Iustin Pop
          val = node.secondary_ip
1205 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1206 a8083063 Iustin Pop
          val = live_data[node.name].get(field, "?")
1207 a8083063 Iustin Pop
        else:
1208 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1209 a8083063 Iustin Pop
        val = str(val)
1210 a8083063 Iustin Pop
        node_output.append(val)
1211 a8083063 Iustin Pop
      output.append(node_output)
1212 a8083063 Iustin Pop
1213 a8083063 Iustin Pop
    return output
1214 a8083063 Iustin Pop
1215 a8083063 Iustin Pop
1216 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1217 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1218 dcb93971 Michael Hanselmann

1219 dcb93971 Michael Hanselmann
  """
1220 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1221 dcb93971 Michael Hanselmann
1222 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1223 dcb93971 Michael Hanselmann
    """Check prerequisites.
1224 dcb93971 Michael Hanselmann

1225 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1226 dcb93971 Michael Hanselmann

1227 dcb93971 Michael Hanselmann
    """
1228 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1229 dcb93971 Michael Hanselmann
1230 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1231 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1232 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1233 dcb93971 Michael Hanselmann
1234 dcb93971 Michael Hanselmann
1235 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1236 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1237 dcb93971 Michael Hanselmann

1238 dcb93971 Michael Hanselmann
    """
1239 dcb93971 Michael Hanselmann
    nodenames = utils.NiceSort([node.name for node in self.nodes])
1240 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1241 dcb93971 Michael Hanselmann
1242 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1243 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1244 dcb93971 Michael Hanselmann
1245 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1246 dcb93971 Michael Hanselmann
1247 dcb93971 Michael Hanselmann
    output = []
1248 dcb93971 Michael Hanselmann
    for node in nodenames:
1249 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1250 37d19eb2 Michael Hanselmann
        continue
1251 37d19eb2 Michael Hanselmann
1252 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1253 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1254 dcb93971 Michael Hanselmann
1255 dcb93971 Michael Hanselmann
      for vol in node_vols:
1256 dcb93971 Michael Hanselmann
        node_output = []
1257 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1258 dcb93971 Michael Hanselmann
          if field == "node":
1259 dcb93971 Michael Hanselmann
            val = node
1260 dcb93971 Michael Hanselmann
          elif field == "phys":
1261 dcb93971 Michael Hanselmann
            val = vol['dev']
1262 dcb93971 Michael Hanselmann
          elif field == "vg":
1263 dcb93971 Michael Hanselmann
            val = vol['vg']
1264 dcb93971 Michael Hanselmann
          elif field == "name":
1265 dcb93971 Michael Hanselmann
            val = vol['name']
1266 dcb93971 Michael Hanselmann
          elif field == "size":
1267 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1268 dcb93971 Michael Hanselmann
          elif field == "instance":
1269 dcb93971 Michael Hanselmann
            for inst in ilist:
1270 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1271 dcb93971 Michael Hanselmann
                continue
1272 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1273 dcb93971 Michael Hanselmann
                val = inst.name
1274 dcb93971 Michael Hanselmann
                break
1275 dcb93971 Michael Hanselmann
            else:
1276 dcb93971 Michael Hanselmann
              val = '-'
1277 dcb93971 Michael Hanselmann
          else:
1278 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1279 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1280 dcb93971 Michael Hanselmann
1281 dcb93971 Michael Hanselmann
        output.append(node_output)
1282 dcb93971 Michael Hanselmann
1283 dcb93971 Michael Hanselmann
    return output
1284 dcb93971 Michael Hanselmann
1285 dcb93971 Michael Hanselmann
1286 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1287 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1288 a8083063 Iustin Pop

1289 a8083063 Iustin Pop
  """
1290 a8083063 Iustin Pop
  HPATH = "node-add"
1291 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1292 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1293 a8083063 Iustin Pop
1294 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1295 a8083063 Iustin Pop
    """Build hooks env.
1296 a8083063 Iustin Pop

1297 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1298 a8083063 Iustin Pop

1299 a8083063 Iustin Pop
    """
1300 a8083063 Iustin Pop
    env = {
1301 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1302 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1303 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1304 a8083063 Iustin Pop
      }
1305 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1306 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1307 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1308 a8083063 Iustin Pop
1309 a8083063 Iustin Pop
  def CheckPrereq(self):
1310 a8083063 Iustin Pop
    """Check prerequisites.
1311 a8083063 Iustin Pop

1312 a8083063 Iustin Pop
    This checks:
1313 a8083063 Iustin Pop
     - the new node is not already in the config
1314 a8083063 Iustin Pop
     - it is resolvable
1315 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1316 a8083063 Iustin Pop

1317 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1318 a8083063 Iustin Pop

1319 a8083063 Iustin Pop
    """
1320 a8083063 Iustin Pop
    node_name = self.op.node_name
1321 a8083063 Iustin Pop
    cfg = self.cfg
1322 a8083063 Iustin Pop
1323 a8083063 Iustin Pop
    dns_data = utils.LookupHostname(node_name)
1324 a8083063 Iustin Pop
    if not dns_data:
1325 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is not resolvable" % node_name)
1326 a8083063 Iustin Pop
1327 a8083063 Iustin Pop
    node = dns_data['hostname']
1328 a8083063 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data['ip']
1329 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1330 a8083063 Iustin Pop
    if secondary_ip is None:
1331 a8083063 Iustin Pop
      secondary_ip = primary_ip
1332 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1333 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1334 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1335 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1336 a8083063 Iustin Pop
    if node in node_list:
1337 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1338 3ecf6786 Iustin Pop
                                 % node)
1339 a8083063 Iustin Pop
1340 a8083063 Iustin Pop
    for existing_node_name in node_list:
1341 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1342 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1343 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1344 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1345 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1346 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1347 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1348 a8083063 Iustin Pop
1349 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1350 a8083063 Iustin Pop
    # same as for the master
1351 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1352 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1353 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1354 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1355 a8083063 Iustin Pop
      if master_singlehomed:
1356 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1357 3ecf6786 Iustin Pop
                                   " new node has one")
1358 a8083063 Iustin Pop
      else:
1359 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1360 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1361 a8083063 Iustin Pop
1362 a8083063 Iustin Pop
    # checks reachablity
1363 a8083063 Iustin Pop
    command = ["fping", "-q", primary_ip]
1364 a8083063 Iustin Pop
    result = utils.RunCmd(command)
1365 a8083063 Iustin Pop
    if result.failed:
1366 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1367 a8083063 Iustin Pop
1368 a8083063 Iustin Pop
    if not newbie_singlehomed:
1369 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1370 a8083063 Iustin Pop
      command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip]
1371 a8083063 Iustin Pop
      result = utils.RunCmd(command)
1372 a8083063 Iustin Pop
      if result.failed:
1373 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node secondary ip not reachable by ping")
1374 a8083063 Iustin Pop
1375 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1376 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1377 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1378 a8083063 Iustin Pop
1379 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1380 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1381 a8083063 Iustin Pop

1382 a8083063 Iustin Pop
    """
1383 a8083063 Iustin Pop
    new_node = self.new_node
1384 a8083063 Iustin Pop
    node = new_node.name
1385 a8083063 Iustin Pop
1386 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1387 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1388 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1389 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1390 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1391 a8083063 Iustin Pop
    try:
1392 a8083063 Iustin Pop
      gntpem = f.read(8192)
1393 a8083063 Iustin Pop
    finally:
1394 a8083063 Iustin Pop
      f.close()
1395 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1396 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1397 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1398 a8083063 Iustin Pop
    # parsed by the shell sequence below
1399 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1400 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1401 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1402 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1403 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1404 a8083063 Iustin Pop
1405 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1406 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1407 a8083063 Iustin Pop
    # either by being constants or by the checks above
1408 a8083063 Iustin Pop
    ss = self.sstore
1409 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1410 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1411 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1412 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1413 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1414 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1415 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1416 a8083063 Iustin Pop
1417 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1418 a8083063 Iustin Pop
    if result.failed:
1419 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1420 3ecf6786 Iustin Pop
                               " output: %s" %
1421 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1422 a8083063 Iustin Pop
1423 a8083063 Iustin Pop
    # check connectivity
1424 a8083063 Iustin Pop
    time.sleep(4)
1425 a8083063 Iustin Pop
1426 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1427 a8083063 Iustin Pop
    if result:
1428 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1429 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1430 a8083063 Iustin Pop
                    (node, result))
1431 a8083063 Iustin Pop
      else:
1432 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1433 3ecf6786 Iustin Pop
                                 " node version %s" %
1434 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1435 a8083063 Iustin Pop
    else:
1436 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1437 a8083063 Iustin Pop
1438 a8083063 Iustin Pop
    # setup ssh on node
1439 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1440 a8083063 Iustin Pop
    keyarray = []
1441 a8083063 Iustin Pop
    keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub",
1442 a8083063 Iustin Pop
                "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub",
1443 a8083063 Iustin Pop
                "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"]
1444 a8083063 Iustin Pop
1445 a8083063 Iustin Pop
    for i in keyfiles:
1446 a8083063 Iustin Pop
      f = open(i, 'r')
1447 a8083063 Iustin Pop
      try:
1448 a8083063 Iustin Pop
        keyarray.append(f.read())
1449 a8083063 Iustin Pop
      finally:
1450 a8083063 Iustin Pop
        f.close()
1451 a8083063 Iustin Pop
1452 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1453 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1454 a8083063 Iustin Pop
1455 a8083063 Iustin Pop
    if not result:
1456 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1457 a8083063 Iustin Pop
1458 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1459 a8083063 Iustin Pop
    _UpdateEtcHosts(new_node.name, new_node.primary_ip)
1460 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1461 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1462 a8083063 Iustin Pop
1463 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1464 a8083063 Iustin Pop
      result = ssh.SSHCall(node, "root",
1465 a8083063 Iustin Pop
                           "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip)
1466 a8083063 Iustin Pop
      if result.failed:
1467 3ecf6786 Iustin Pop
        raise errors.OpExecError("Node claims it doesn't have the"
1468 3ecf6786 Iustin Pop
                                 " secondary ip you gave (%s).\n"
1469 3ecf6786 Iustin Pop
                                 "Please fix and re-run this command." %
1470 3ecf6786 Iustin Pop
                                 new_node.secondary_ip)
1471 a8083063 Iustin Pop
1472 ff98055b Iustin Pop
    success, msg = ssh.VerifyNodeHostname(node)
1473 ff98055b Iustin Pop
    if not success:
1474 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1475 ff98055b Iustin Pop
                               " than the one the resolver gives: %s.\n"
1476 ff98055b Iustin Pop
                               "Please fix and re-run this command." %
1477 ff98055b Iustin Pop
                               (node, msg))
1478 ff98055b Iustin Pop
1479 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1480 a8083063 Iustin Pop
    # including the node just added
1481 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1482 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1483 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1484 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1485 a8083063 Iustin Pop
1486 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1487 82122173 Iustin Pop
    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
1488 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1489 a8083063 Iustin Pop
      for to_node in dist_nodes:
1490 a8083063 Iustin Pop
        if not result[to_node]:
1491 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1492 a8083063 Iustin Pop
                       (fname, to_node))
1493 a8083063 Iustin Pop
1494 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1495 a8083063 Iustin Pop
    for fname in to_copy:
1496 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1497 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1498 a8083063 Iustin Pop
1499 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1500 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1501 a8083063 Iustin Pop
1502 a8083063 Iustin Pop
1503 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1504 a8083063 Iustin Pop
  """Failover the master node to the current node.
1505 a8083063 Iustin Pop

1506 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1507 a8083063 Iustin Pop

1508 a8083063 Iustin Pop
  """
1509 a8083063 Iustin Pop
  HPATH = "master-failover"
1510 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1511 a8083063 Iustin Pop
  REQ_MASTER = False
1512 a8083063 Iustin Pop
  _OP_REQP = []
1513 a8083063 Iustin Pop
1514 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1515 a8083063 Iustin Pop
    """Build hooks env.
1516 a8083063 Iustin Pop

1517 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1518 a8083063 Iustin Pop
    the nodes in the post phase.
1519 a8083063 Iustin Pop

1520 a8083063 Iustin Pop
    """
1521 a8083063 Iustin Pop
    env = {
1522 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1523 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1524 a8083063 Iustin Pop
      }
1525 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1526 a8083063 Iustin Pop
1527 a8083063 Iustin Pop
  def CheckPrereq(self):
1528 a8083063 Iustin Pop
    """Check prerequisites.
1529 a8083063 Iustin Pop

1530 a8083063 Iustin Pop
    This checks that we are not already the master.
1531 a8083063 Iustin Pop

1532 a8083063 Iustin Pop
    """
1533 a8083063 Iustin Pop
    self.new_master = socket.gethostname()
1534 a8083063 Iustin Pop
1535 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1536 a8083063 Iustin Pop
1537 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1538 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1539 3ecf6786 Iustin Pop
                                 " where you want the new master to be.\n"
1540 3ecf6786 Iustin Pop
                                 "%s is already the master" %
1541 3ecf6786 Iustin Pop
                                 self.old_master)
1542 a8083063 Iustin Pop
1543 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1544 a8083063 Iustin Pop
    """Failover the master node.
1545 a8083063 Iustin Pop

1546 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1547 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1548 a8083063 Iustin Pop
    master.
1549 a8083063 Iustin Pop

1550 a8083063 Iustin Pop
    """
1551 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1552 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1553 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1554 a8083063 Iustin Pop
1555 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1556 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1557 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1558 a8083063 Iustin Pop
1559 880478f8 Iustin Pop
    ss = self.sstore
1560 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1561 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1562 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1563 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1564 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1565 880478f8 Iustin Pop
1566 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1567 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1568 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1569 880478f8 Iustin Pop
      feedback_fn("Error in activating the master IP on the new master,\n"
1570 880478f8 Iustin Pop
                  "please fix manually.")
1571 a8083063 Iustin Pop
1572 a8083063 Iustin Pop
1573 a8083063 Iustin Pop
1574 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1575 a8083063 Iustin Pop
  """Query cluster configuration.
1576 a8083063 Iustin Pop

1577 a8083063 Iustin Pop
  """
1578 a8083063 Iustin Pop
  _OP_REQP = []
1579 59322403 Iustin Pop
  REQ_MASTER = False
1580 a8083063 Iustin Pop
1581 a8083063 Iustin Pop
  def CheckPrereq(self):
1582 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1583 a8083063 Iustin Pop

1584 a8083063 Iustin Pop
    """
1585 a8083063 Iustin Pop
    pass
1586 a8083063 Iustin Pop
1587 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1588 a8083063 Iustin Pop
    """Return cluster config.
1589 a8083063 Iustin Pop

1590 a8083063 Iustin Pop
    """
1591 a8083063 Iustin Pop
    result = {
1592 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1593 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1594 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1595 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1596 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1597 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1598 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1599 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1600 a8083063 Iustin Pop
      }
1601 a8083063 Iustin Pop
1602 a8083063 Iustin Pop
    return result
1603 a8083063 Iustin Pop
1604 a8083063 Iustin Pop
1605 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1606 a8083063 Iustin Pop
  """Copy file to cluster.
1607 a8083063 Iustin Pop

1608 a8083063 Iustin Pop
  """
1609 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1610 a8083063 Iustin Pop
1611 a8083063 Iustin Pop
  def CheckPrereq(self):
1612 a8083063 Iustin Pop
    """Check prerequisites.
1613 a8083063 Iustin Pop

1614 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1615 a8083063 Iustin Pop
    of nodes is valid.
1616 a8083063 Iustin Pop

1617 a8083063 Iustin Pop
    """
1618 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1619 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1620 dcb93971 Michael Hanselmann
1621 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1622 a8083063 Iustin Pop
1623 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1624 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1625 a8083063 Iustin Pop

1626 a8083063 Iustin Pop
    Args:
1627 a8083063 Iustin Pop
      opts - class with options as members
1628 a8083063 Iustin Pop
      args - list containing a single element, the file name
1629 a8083063 Iustin Pop
    Opts used:
1630 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1631 a8083063 Iustin Pop

1632 a8083063 Iustin Pop
    """
1633 a8083063 Iustin Pop
    filename = self.op.filename
1634 a8083063 Iustin Pop
1635 a8083063 Iustin Pop
    myname = socket.gethostname()
1636 a8083063 Iustin Pop
1637 8bd562f5 Iustin Pop
    for node in [node.name for node in self.nodes]:
1638 a8083063 Iustin Pop
      if node == myname:
1639 a8083063 Iustin Pop
        continue
1640 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1641 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1642 a8083063 Iustin Pop
1643 a8083063 Iustin Pop
1644 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1645 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1646 a8083063 Iustin Pop

1647 a8083063 Iustin Pop
  """
1648 a8083063 Iustin Pop
  _OP_REQP = []
1649 a8083063 Iustin Pop
1650 a8083063 Iustin Pop
  def CheckPrereq(self):
1651 a8083063 Iustin Pop
    """No prerequisites.
1652 a8083063 Iustin Pop

1653 a8083063 Iustin Pop
    """
1654 a8083063 Iustin Pop
    pass
1655 a8083063 Iustin Pop
1656 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1657 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1658 a8083063 Iustin Pop

1659 a8083063 Iustin Pop
    """
1660 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1661 a8083063 Iustin Pop
1662 a8083063 Iustin Pop
1663 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1664 a8083063 Iustin Pop
  """Run a command on some nodes.
1665 a8083063 Iustin Pop

1666 a8083063 Iustin Pop
  """
1667 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1668 a8083063 Iustin Pop
1669 a8083063 Iustin Pop
  def CheckPrereq(self):
1670 a8083063 Iustin Pop
    """Check prerequisites.
1671 a8083063 Iustin Pop

1672 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1673 a8083063 Iustin Pop

1674 a8083063 Iustin Pop
    """
1675 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1676 a8083063 Iustin Pop
1677 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1678 a8083063 Iustin Pop
    """Run a command on some nodes.
1679 a8083063 Iustin Pop

1680 a8083063 Iustin Pop
    """
1681 a8083063 Iustin Pop
    data = []
1682 a8083063 Iustin Pop
    for node in self.nodes:
1683 02715459 Iustin Pop
      result = ssh.SSHCall(node.name, "root", self.op.command)
1684 02715459 Iustin Pop
      data.append((node.name, result.output, result.exit_code))
1685 a8083063 Iustin Pop
1686 a8083063 Iustin Pop
    return data
1687 a8083063 Iustin Pop
1688 a8083063 Iustin Pop
1689 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1690 a8083063 Iustin Pop
  """Bring up an instance's disks.
1691 a8083063 Iustin Pop

1692 a8083063 Iustin Pop
  """
1693 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1694 a8083063 Iustin Pop
1695 a8083063 Iustin Pop
  def CheckPrereq(self):
1696 a8083063 Iustin Pop
    """Check prerequisites.
1697 a8083063 Iustin Pop

1698 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1699 a8083063 Iustin Pop

1700 a8083063 Iustin Pop
    """
1701 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1702 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1703 a8083063 Iustin Pop
    if instance is None:
1704 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1705 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1706 a8083063 Iustin Pop
    self.instance = instance
1707 a8083063 Iustin Pop
1708 a8083063 Iustin Pop
1709 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1710 a8083063 Iustin Pop
    """Activate the disks.
1711 a8083063 Iustin Pop

1712 a8083063 Iustin Pop
    """
1713 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1714 a8083063 Iustin Pop
    if not disks_ok:
1715 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1716 a8083063 Iustin Pop
1717 a8083063 Iustin Pop
    return disks_info
1718 a8083063 Iustin Pop
1719 a8083063 Iustin Pop
1720 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1721 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1722 a8083063 Iustin Pop

1723 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1724 a8083063 Iustin Pop

1725 a8083063 Iustin Pop
  Args:
1726 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1727 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1728 a8083063 Iustin Pop
                        in an error return from the function
1729 a8083063 Iustin Pop

1730 a8083063 Iustin Pop
  Returns:
1731 a8083063 Iustin Pop
    false if the operation failed
1732 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1733 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1734 a8083063 Iustin Pop
  """
1735 a8083063 Iustin Pop
  device_info = []
1736 a8083063 Iustin Pop
  disks_ok = True
1737 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1738 a8083063 Iustin Pop
    master_result = None
1739 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1740 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1741 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1742 a8083063 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, is_primary)
1743 a8083063 Iustin Pop
      if not result:
1744 a8083063 Iustin Pop
        logger.Error("could not prepare block device %s on node %s (is_pri"
1745 a8083063 Iustin Pop
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1746 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1747 a8083063 Iustin Pop
          disks_ok = False
1748 a8083063 Iustin Pop
      if is_primary:
1749 a8083063 Iustin Pop
        master_result = result
1750 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1751 a8083063 Iustin Pop
                        master_result))
1752 a8083063 Iustin Pop
1753 a8083063 Iustin Pop
  return disks_ok, device_info
1754 a8083063 Iustin Pop
1755 a8083063 Iustin Pop
1756 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1757 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1758 3ecf6786 Iustin Pop

1759 3ecf6786 Iustin Pop
  """
1760 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1761 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1762 fe7b0351 Michael Hanselmann
  if not disks_ok:
1763 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1764 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1765 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1766 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1767 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1768 fe7b0351 Michael Hanselmann
1769 fe7b0351 Michael Hanselmann
1770 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1771 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1772 a8083063 Iustin Pop

1773 a8083063 Iustin Pop
  """
1774 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1775 a8083063 Iustin Pop
1776 a8083063 Iustin Pop
  def CheckPrereq(self):
1777 a8083063 Iustin Pop
    """Check prerequisites.
1778 a8083063 Iustin Pop

1779 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1780 a8083063 Iustin Pop

1781 a8083063 Iustin Pop
    """
1782 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1783 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1784 a8083063 Iustin Pop
    if instance is None:
1785 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1786 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1787 a8083063 Iustin Pop
    self.instance = instance
1788 a8083063 Iustin Pop
1789 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1790 a8083063 Iustin Pop
    """Deactivate the disks
1791 a8083063 Iustin Pop

1792 a8083063 Iustin Pop
    """
1793 a8083063 Iustin Pop
    instance = self.instance
1794 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1795 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1796 a8083063 Iustin Pop
    if not type(ins_l) is list:
1797 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1798 3ecf6786 Iustin Pop
                               instance.primary_node)
1799 a8083063 Iustin Pop
1800 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1801 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1802 3ecf6786 Iustin Pop
                               " block devices.")
1803 a8083063 Iustin Pop
1804 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1805 a8083063 Iustin Pop
1806 a8083063 Iustin Pop
1807 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1808 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1809 a8083063 Iustin Pop

1810 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1811 a8083063 Iustin Pop

1812 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1813 a8083063 Iustin Pop
  ignored.
1814 a8083063 Iustin Pop

1815 a8083063 Iustin Pop
  """
1816 a8083063 Iustin Pop
  result = True
1817 a8083063 Iustin Pop
  for disk in instance.disks:
1818 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1819 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1820 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1821 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1822 a8083063 Iustin Pop
                     (disk.iv_name, node))
1823 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1824 a8083063 Iustin Pop
          result = False
1825 a8083063 Iustin Pop
  return result
1826 a8083063 Iustin Pop
1827 a8083063 Iustin Pop
1828 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1829 a8083063 Iustin Pop
  """Starts an instance.
1830 a8083063 Iustin Pop

1831 a8083063 Iustin Pop
  """
1832 a8083063 Iustin Pop
  HPATH = "instance-start"
1833 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1834 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1835 a8083063 Iustin Pop
1836 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1837 a8083063 Iustin Pop
    """Build hooks env.
1838 a8083063 Iustin Pop

1839 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1840 a8083063 Iustin Pop

1841 a8083063 Iustin Pop
    """
1842 a8083063 Iustin Pop
    env = {
1843 a8083063 Iustin Pop
      "FORCE": self.op.force,
1844 a8083063 Iustin Pop
      }
1845 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1846 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1847 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1848 a8083063 Iustin Pop
    return env, nl, nl
1849 a8083063 Iustin Pop
1850 a8083063 Iustin Pop
  def CheckPrereq(self):
1851 a8083063 Iustin Pop
    """Check prerequisites.
1852 a8083063 Iustin Pop

1853 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1854 a8083063 Iustin Pop

1855 a8083063 Iustin Pop
    """
1856 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1857 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1858 a8083063 Iustin Pop
    if instance is None:
1859 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1860 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1861 a8083063 Iustin Pop
1862 a8083063 Iustin Pop
    # check bridges existance
1863 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
1864 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
1865 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("one or more target bridges %s does not"
1866 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
1867 3ecf6786 Iustin Pop
                                 (brlist, instance.primary_node))
1868 a8083063 Iustin Pop
1869 a8083063 Iustin Pop
    self.instance = instance
1870 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1871 a8083063 Iustin Pop
1872 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1873 a8083063 Iustin Pop
    """Start the instance.
1874 a8083063 Iustin Pop

1875 a8083063 Iustin Pop
    """
1876 a8083063 Iustin Pop
    instance = self.instance
1877 a8083063 Iustin Pop
    force = self.op.force
1878 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1879 a8083063 Iustin Pop
1880 a8083063 Iustin Pop
    node_current = instance.primary_node
1881 a8083063 Iustin Pop
1882 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1883 a8083063 Iustin Pop
    if not nodeinfo:
1884 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact node %s for infos" %
1885 3ecf6786 Iustin Pop
                               (node_current))
1886 a8083063 Iustin Pop
1887 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
1888 a8083063 Iustin Pop
    memory = instance.memory
1889 a8083063 Iustin Pop
    if memory > freememory:
1890 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to start instance"
1891 3ecf6786 Iustin Pop
                               " %s on node %s"
1892 3ecf6786 Iustin Pop
                               " needed %s MiB, available %s MiB" %
1893 3ecf6786 Iustin Pop
                               (instance.name, node_current, memory,
1894 3ecf6786 Iustin Pop
                                freememory))
1895 a8083063 Iustin Pop
1896 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
1897 a8083063 Iustin Pop
1898 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
1899 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
1900 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
1901 a8083063 Iustin Pop
1902 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
1903 a8083063 Iustin Pop
1904 a8083063 Iustin Pop
1905 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
1906 a8083063 Iustin Pop
  """Shutdown an instance.
1907 a8083063 Iustin Pop

1908 a8083063 Iustin Pop
  """
1909 a8083063 Iustin Pop
  HPATH = "instance-stop"
1910 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1911 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1912 a8083063 Iustin Pop
1913 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1914 a8083063 Iustin Pop
    """Build hooks env.
1915 a8083063 Iustin Pop

1916 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1917 a8083063 Iustin Pop

1918 a8083063 Iustin Pop
    """
1919 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
1920 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1921 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1922 a8083063 Iustin Pop
    return env, nl, nl
1923 a8083063 Iustin Pop
1924 a8083063 Iustin Pop
  def CheckPrereq(self):
1925 a8083063 Iustin Pop
    """Check prerequisites.
1926 a8083063 Iustin Pop

1927 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1928 a8083063 Iustin Pop

1929 a8083063 Iustin Pop
    """
1930 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1931 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1932 a8083063 Iustin Pop
    if instance is None:
1933 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1934 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1935 a8083063 Iustin Pop
    self.instance = instance
1936 a8083063 Iustin Pop
1937 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1938 a8083063 Iustin Pop
    """Shutdown the instance.
1939 a8083063 Iustin Pop

1940 a8083063 Iustin Pop
    """
1941 a8083063 Iustin Pop
    instance = self.instance
1942 a8083063 Iustin Pop
    node_current = instance.primary_node
1943 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
1944 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
1945 a8083063 Iustin Pop
1946 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
1947 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1948 a8083063 Iustin Pop
1949 a8083063 Iustin Pop
1950 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
1951 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
1952 fe7b0351 Michael Hanselmann

1953 fe7b0351 Michael Hanselmann
  """
1954 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
1955 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
1956 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
1957 fe7b0351 Michael Hanselmann
1958 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
1959 fe7b0351 Michael Hanselmann
    """Build hooks env.
1960 fe7b0351 Michael Hanselmann

1961 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
1962 fe7b0351 Michael Hanselmann

1963 fe7b0351 Michael Hanselmann
    """
1964 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
1965 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1966 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
1967 fe7b0351 Michael Hanselmann
    return env, nl, nl
1968 fe7b0351 Michael Hanselmann
1969 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
1970 fe7b0351 Michael Hanselmann
    """Check prerequisites.
1971 fe7b0351 Michael Hanselmann

1972 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
1973 fe7b0351 Michael Hanselmann

1974 fe7b0351 Michael Hanselmann
    """
1975 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
1976 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
1977 fe7b0351 Michael Hanselmann
    if instance is None:
1978 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1979 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1980 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
1981 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
1982 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1983 fe7b0351 Michael Hanselmann
    if instance.status != "down":
1984 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
1985 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1986 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
1987 fe7b0351 Michael Hanselmann
    if remote_info:
1988 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
1989 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
1990 3ecf6786 Iustin Pop
                                  instance.primary_node))
1991 d0834de3 Michael Hanselmann
1992 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
1993 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
1994 d0834de3 Michael Hanselmann
      # OS verification
1995 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
1996 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
1997 d0834de3 Michael Hanselmann
      if pnode is None:
1998 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
1999 3ecf6786 Iustin Pop
                                   self.op.pnode)
2000 d0834de3 Michael Hanselmann
      os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2001 d0834de3 Michael Hanselmann
      if not isinstance(os_obj, objects.OS):
2002 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2003 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2004 d0834de3 Michael Hanselmann
2005 fe7b0351 Michael Hanselmann
    self.instance = instance
2006 fe7b0351 Michael Hanselmann
2007 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2008 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2009 fe7b0351 Michael Hanselmann

2010 fe7b0351 Michael Hanselmann
    """
2011 fe7b0351 Michael Hanselmann
    inst = self.instance
2012 fe7b0351 Michael Hanselmann
2013 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2014 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2015 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2016 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2017 d0834de3 Michael Hanselmann
2018 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2019 fe7b0351 Michael Hanselmann
    try:
2020 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2021 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2022 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not install OS for instance %s "
2023 3ecf6786 Iustin Pop
                                 "on node %s" %
2024 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2025 fe7b0351 Michael Hanselmann
    finally:
2026 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2027 fe7b0351 Michael Hanselmann
2028 fe7b0351 Michael Hanselmann
2029 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2030 a8083063 Iustin Pop
  """Remove an instance.
2031 a8083063 Iustin Pop

2032 a8083063 Iustin Pop
  """
2033 a8083063 Iustin Pop
  HPATH = "instance-remove"
2034 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2035 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2036 a8083063 Iustin Pop
2037 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2038 a8083063 Iustin Pop
    """Build hooks env.
2039 a8083063 Iustin Pop

2040 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2041 a8083063 Iustin Pop

2042 a8083063 Iustin Pop
    """
2043 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2044 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2045 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2046 a8083063 Iustin Pop
    return env, nl, nl
2047 a8083063 Iustin Pop
2048 a8083063 Iustin Pop
  def CheckPrereq(self):
2049 a8083063 Iustin Pop
    """Check prerequisites.
2050 a8083063 Iustin Pop

2051 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2052 a8083063 Iustin Pop

2053 a8083063 Iustin Pop
    """
2054 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2055 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2056 a8083063 Iustin Pop
    if instance is None:
2057 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2058 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2059 a8083063 Iustin Pop
    self.instance = instance
2060 a8083063 Iustin Pop
2061 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2062 a8083063 Iustin Pop
    """Remove the instance.
2063 a8083063 Iustin Pop

2064 a8083063 Iustin Pop
    """
2065 a8083063 Iustin Pop
    instance = self.instance
2066 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2067 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2068 a8083063 Iustin Pop
2069 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2070 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2071 3ecf6786 Iustin Pop
                               (instance.name, instance.primary_node))
2072 a8083063 Iustin Pop
2073 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2074 a8083063 Iustin Pop
2075 a8083063 Iustin Pop
    _RemoveDisks(instance, self.cfg)
2076 a8083063 Iustin Pop
2077 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2078 a8083063 Iustin Pop
2079 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2080 a8083063 Iustin Pop
2081 a8083063 Iustin Pop
2082 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2083 a8083063 Iustin Pop
  """Logical unit for querying instances.
2084 a8083063 Iustin Pop

2085 a8083063 Iustin Pop
  """
2086 dcb93971 Michael Hanselmann
  _OP_REQP = ["output_fields"]
2087 a8083063 Iustin Pop
2088 a8083063 Iustin Pop
  def CheckPrereq(self):
2089 a8083063 Iustin Pop
    """Check prerequisites.
2090 a8083063 Iustin Pop

2091 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2092 a8083063 Iustin Pop

2093 a8083063 Iustin Pop
    """
2094 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
2095 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2096 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2097 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2098 644eeef9 Iustin Pop
                               "sda_size", "sdb_size"],
2099 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2100 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2101 a8083063 Iustin Pop
2102 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2103 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2104 a8083063 Iustin Pop

2105 a8083063 Iustin Pop
    """
2106 a8083063 Iustin Pop
    instance_names = utils.NiceSort(self.cfg.GetInstanceList())
2107 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2108 a8083063 Iustin Pop
                     in instance_names]
2109 a8083063 Iustin Pop
2110 a8083063 Iustin Pop
    # begin data gathering
2111 a8083063 Iustin Pop
2112 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2113 a8083063 Iustin Pop
2114 a8083063 Iustin Pop
    bad_nodes = []
2115 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2116 a8083063 Iustin Pop
      live_data = {}
2117 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2118 a8083063 Iustin Pop
      for name in nodes:
2119 a8083063 Iustin Pop
        result = node_data[name]
2120 a8083063 Iustin Pop
        if result:
2121 a8083063 Iustin Pop
          live_data.update(result)
2122 a8083063 Iustin Pop
        elif result == False:
2123 a8083063 Iustin Pop
          bad_nodes.append(name)
2124 a8083063 Iustin Pop
        # else no instance is alive
2125 a8083063 Iustin Pop
    else:
2126 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2127 a8083063 Iustin Pop
2128 a8083063 Iustin Pop
    # end data gathering
2129 a8083063 Iustin Pop
2130 a8083063 Iustin Pop
    output = []
2131 a8083063 Iustin Pop
    for instance in instance_list:
2132 a8083063 Iustin Pop
      iout = []
2133 a8083063 Iustin Pop
      for field in self.op.output_fields:
2134 a8083063 Iustin Pop
        if field == "name":
2135 a8083063 Iustin Pop
          val = instance.name
2136 a8083063 Iustin Pop
        elif field == "os":
2137 a8083063 Iustin Pop
          val = instance.os
2138 a8083063 Iustin Pop
        elif field == "pnode":
2139 a8083063 Iustin Pop
          val = instance.primary_node
2140 a8083063 Iustin Pop
        elif field == "snodes":
2141 a8083063 Iustin Pop
          val = ",".join(instance.secondary_nodes) or "-"
2142 a8083063 Iustin Pop
        elif field == "admin_state":
2143 a8083063 Iustin Pop
          if instance.status == "down":
2144 a8083063 Iustin Pop
            val = "no"
2145 a8083063 Iustin Pop
          else:
2146 a8083063 Iustin Pop
            val = "yes"
2147 a8083063 Iustin Pop
        elif field == "oper_state":
2148 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2149 a8083063 Iustin Pop
            val = "(node down)"
2150 a8083063 Iustin Pop
          else:
2151 a8083063 Iustin Pop
            if live_data.get(instance.name):
2152 a8083063 Iustin Pop
              val = "running"
2153 a8083063 Iustin Pop
            else:
2154 a8083063 Iustin Pop
              val = "stopped"
2155 a8083063 Iustin Pop
        elif field == "admin_ram":
2156 a8083063 Iustin Pop
          val = instance.memory
2157 a8083063 Iustin Pop
        elif field == "oper_ram":
2158 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2159 a8083063 Iustin Pop
            val = "(node down)"
2160 a8083063 Iustin Pop
          elif instance.name in live_data:
2161 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2162 a8083063 Iustin Pop
          else:
2163 a8083063 Iustin Pop
            val = "-"
2164 a8083063 Iustin Pop
        elif field == "disk_template":
2165 a8083063 Iustin Pop
          val = instance.disk_template
2166 a8083063 Iustin Pop
        elif field == "ip":
2167 a8083063 Iustin Pop
          val = instance.nics[0].ip
2168 a8083063 Iustin Pop
        elif field == "bridge":
2169 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2170 a8083063 Iustin Pop
        elif field == "mac":
2171 a8083063 Iustin Pop
          val = instance.nics[0].mac
2172 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2173 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2174 644eeef9 Iustin Pop
          if disk is None:
2175 644eeef9 Iustin Pop
            val = "N/A"
2176 644eeef9 Iustin Pop
          else:
2177 644eeef9 Iustin Pop
            val = disk.size
2178 a8083063 Iustin Pop
        else:
2179 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2180 a8083063 Iustin Pop
        val = str(val)
2181 a8083063 Iustin Pop
        iout.append(val)
2182 a8083063 Iustin Pop
      output.append(iout)
2183 a8083063 Iustin Pop
2184 a8083063 Iustin Pop
    return output
2185 a8083063 Iustin Pop
2186 a8083063 Iustin Pop
2187 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2188 a8083063 Iustin Pop
  """Failover an instance.
2189 a8083063 Iustin Pop

2190 a8083063 Iustin Pop
  """
2191 a8083063 Iustin Pop
  HPATH = "instance-failover"
2192 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2193 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2194 a8083063 Iustin Pop
2195 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2196 a8083063 Iustin Pop
    """Build hooks env.
2197 a8083063 Iustin Pop

2198 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2199 a8083063 Iustin Pop

2200 a8083063 Iustin Pop
    """
2201 a8083063 Iustin Pop
    env = {
2202 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2203 a8083063 Iustin Pop
      }
2204 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2205 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2206 a8083063 Iustin Pop
    return env, nl, nl
2207 a8083063 Iustin Pop
2208 a8083063 Iustin Pop
  def CheckPrereq(self):
2209 a8083063 Iustin Pop
    """Check prerequisites.
2210 a8083063 Iustin Pop

2211 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2212 a8083063 Iustin Pop

2213 a8083063 Iustin Pop
    """
2214 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2215 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2216 a8083063 Iustin Pop
    if instance is None:
2217 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2218 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2219 a8083063 Iustin Pop
2220 2a710df1 Michael Hanselmann
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2221 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2222 2a710df1 Michael Hanselmann
                                 " remote_raid1.")
2223 2a710df1 Michael Hanselmann
2224 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2225 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2226 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2227 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2228 2a710df1 Michael Hanselmann
2229 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2230 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2231 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2232 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2233 3a7c308e Guido Trotter
    if not info:
2234 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
2235 3ecf6786 Iustin Pop
                                 " from node '%s'" % nodeinfo)
2236 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2237 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Not enough memory on target node %s."
2238 3ecf6786 Iustin Pop
                                 " %d MB available, %d MB required" %
2239 3ecf6786 Iustin Pop
                                 (target_node, info['memory_free'],
2240 3ecf6786 Iustin Pop
                                  instance.memory))
2241 3a7c308e Guido Trotter
2242 a8083063 Iustin Pop
    # check bridge existance
2243 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2244 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
2245 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2246 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2247 3ecf6786 Iustin Pop
                                 (brlist, instance.primary_node))
2248 a8083063 Iustin Pop
2249 a8083063 Iustin Pop
    self.instance = instance
2250 a8083063 Iustin Pop
2251 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2252 a8083063 Iustin Pop
    """Failover an instance.
2253 a8083063 Iustin Pop

2254 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2255 a8083063 Iustin Pop
    starting it on the secondary.
2256 a8083063 Iustin Pop

2257 a8083063 Iustin Pop
    """
2258 a8083063 Iustin Pop
    instance = self.instance
2259 a8083063 Iustin Pop
2260 a8083063 Iustin Pop
    source_node = instance.primary_node
2261 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2262 a8083063 Iustin Pop
2263 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2264 a8083063 Iustin Pop
    for dev in instance.disks:
2265 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2266 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2267 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2268 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2269 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2270 a8083063 Iustin Pop
2271 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2272 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2273 a8083063 Iustin Pop
2274 a8083063 Iustin Pop
    if not nodeinfo:
2275 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact target node %s." %
2276 3ecf6786 Iustin Pop
                               target_node)
2277 a8083063 Iustin Pop
2278 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2279 a8083063 Iustin Pop
    memory = instance.memory
2280 a8083063 Iustin Pop
    if memory > free_memory:
2281 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to create instance %s on"
2282 3ecf6786 Iustin Pop
                               " node %s. needed %s MiB, available %s MiB" %
2283 3ecf6786 Iustin Pop
                               (instance.name, target_node, memory,
2284 3ecf6786 Iustin Pop
                                free_memory))
2285 a8083063 Iustin Pop
2286 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2287 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2288 a8083063 Iustin Pop
                (instance.name, source_node))
2289 a8083063 Iustin Pop
2290 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2291 a8083063 Iustin Pop
      logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2292 a8083063 Iustin Pop
                   " anyway. Please make sure node %s is down"  %
2293 a8083063 Iustin Pop
                   (instance.name, source_node, source_node))
2294 a8083063 Iustin Pop
2295 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2296 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2297 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2298 a8083063 Iustin Pop
2299 a8083063 Iustin Pop
    instance.primary_node = target_node
2300 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2301 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2302 a8083063 Iustin Pop
2303 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2304 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2305 a8083063 Iustin Pop
                (instance.name, target_node))
2306 a8083063 Iustin Pop
2307 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2308 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2309 a8083063 Iustin Pop
    if not disks_ok:
2310 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2311 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2312 a8083063 Iustin Pop
2313 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2314 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2315 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2316 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2317 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2318 a8083063 Iustin Pop
2319 a8083063 Iustin Pop
2320 a0c3fea1 Michael Hanselmann
def _CreateBlockDevOnPrimary(cfg, node, device, info):
2321 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2322 a8083063 Iustin Pop

2323 a8083063 Iustin Pop
  This always creates all devices.
2324 a8083063 Iustin Pop

2325 a8083063 Iustin Pop
  """
2326 a8083063 Iustin Pop
  if device.children:
2327 a8083063 Iustin Pop
    for child in device.children:
2328 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnPrimary(cfg, node, child, info):
2329 a8083063 Iustin Pop
        return False
2330 a8083063 Iustin Pop
2331 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2332 a0c3fea1 Michael Hanselmann
  new_id = rpc.call_blockdev_create(node, device, device.size, True, info)
2333 a8083063 Iustin Pop
  if not new_id:
2334 a8083063 Iustin Pop
    return False
2335 a8083063 Iustin Pop
  if device.physical_id is None:
2336 a8083063 Iustin Pop
    device.physical_id = new_id
2337 a8083063 Iustin Pop
  return True
2338 a8083063 Iustin Pop
2339 a8083063 Iustin Pop
2340 a0c3fea1 Michael Hanselmann
def _CreateBlockDevOnSecondary(cfg, node, device, force, info):
2341 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2342 a8083063 Iustin Pop

2343 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2344 a8083063 Iustin Pop
  all its children.
2345 a8083063 Iustin Pop

2346 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2347 a8083063 Iustin Pop

2348 a8083063 Iustin Pop
  """
2349 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2350 a8083063 Iustin Pop
    force = True
2351 a8083063 Iustin Pop
  if device.children:
2352 a8083063 Iustin Pop
    for child in device.children:
2353 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, node, child, force, info):
2354 a8083063 Iustin Pop
        return False
2355 a8083063 Iustin Pop
2356 a8083063 Iustin Pop
  if not force:
2357 a8083063 Iustin Pop
    return True
2358 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2359 a0c3fea1 Michael Hanselmann
  new_id = rpc.call_blockdev_create(node, device, device.size, False, info)
2360 a8083063 Iustin Pop
  if not new_id:
2361 a8083063 Iustin Pop
    return False
2362 a8083063 Iustin Pop
  if device.physical_id is None:
2363 a8083063 Iustin Pop
    device.physical_id = new_id
2364 a8083063 Iustin Pop
  return True
2365 a8083063 Iustin Pop
2366 a8083063 Iustin Pop
2367 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2368 923b1523 Iustin Pop
  """Generate a suitable LV name.
2369 923b1523 Iustin Pop

2370 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2371 923b1523 Iustin Pop

2372 923b1523 Iustin Pop
  """
2373 923b1523 Iustin Pop
  results = []
2374 923b1523 Iustin Pop
  for val in exts:
2375 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2376 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2377 923b1523 Iustin Pop
  return results
2378 923b1523 Iustin Pop
2379 923b1523 Iustin Pop
2380 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2381 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2382 a8083063 Iustin Pop

2383 a8083063 Iustin Pop
  """
2384 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2385 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2386 a8083063 Iustin Pop
  dev_data = objects.Disk(dev_type="lvm", size=size,
2387 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2388 a8083063 Iustin Pop
  dev_meta = objects.Disk(dev_type="lvm", size=128,
2389 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2390 a8083063 Iustin Pop
  drbd_dev = objects.Disk(dev_type="drbd", size=size,
2391 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2392 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2393 a8083063 Iustin Pop
  return drbd_dev
2394 a8083063 Iustin Pop
2395 a8083063 Iustin Pop
2396 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2397 a8083063 Iustin Pop
                          instance_name, primary_node,
2398 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2399 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2400 a8083063 Iustin Pop

2401 a8083063 Iustin Pop
  """
2402 a8083063 Iustin Pop
  #TODO: compute space requirements
2403 a8083063 Iustin Pop
2404 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2405 a8083063 Iustin Pop
  if template_name == "diskless":
2406 a8083063 Iustin Pop
    disks = []
2407 a8083063 Iustin Pop
  elif template_name == "plain":
2408 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2409 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2410 923b1523 Iustin Pop
2411 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2412 a8083063 Iustin Pop
    sda_dev = objects.Disk(dev_type="lvm", size=disk_sz,
2413 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2414 a8083063 Iustin Pop
                           iv_name = "sda")
2415 a8083063 Iustin Pop
    sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz,
2416 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2417 a8083063 Iustin Pop
                           iv_name = "sdb")
2418 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2419 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2420 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2421 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2422 923b1523 Iustin Pop
2423 923b1523 Iustin Pop
2424 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2425 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2426 a8083063 Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz,
2427 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2428 a8083063 Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz,
2429 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2430 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda",
2431 a8083063 Iustin Pop
                              size=disk_sz,
2432 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2433 a8083063 Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz,
2434 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2435 a8083063 Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz,
2436 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2437 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb",
2438 a8083063 Iustin Pop
                              size=swap_sz,
2439 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2440 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2441 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2442 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2443 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2444 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2445 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2446 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2447 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2448 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2449 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda",
2450 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2451 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2452 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2453 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb",
2454 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2455 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2456 a8083063 Iustin Pop
  else:
2457 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2458 a8083063 Iustin Pop
  return disks
2459 a8083063 Iustin Pop
2460 a8083063 Iustin Pop
2461 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2462 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2463 3ecf6786 Iustin Pop

2464 3ecf6786 Iustin Pop
  """
2465 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2466 a0c3fea1 Michael Hanselmann
2467 a0c3fea1 Michael Hanselmann
2468 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2469 a8083063 Iustin Pop
  """Create all disks for an instance.
2470 a8083063 Iustin Pop

2471 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2472 a8083063 Iustin Pop

2473 a8083063 Iustin Pop
  Args:
2474 a8083063 Iustin Pop
    instance: the instance object
2475 a8083063 Iustin Pop

2476 a8083063 Iustin Pop
  Returns:
2477 a8083063 Iustin Pop
    True or False showing the success of the creation process
2478 a8083063 Iustin Pop

2479 a8083063 Iustin Pop
  """
2480 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2481 a0c3fea1 Michael Hanselmann
2482 a8083063 Iustin Pop
  for device in instance.disks:
2483 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2484 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2485 a8083063 Iustin Pop
    #HARDCODE
2486 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2487 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False,
2488 a0c3fea1 Michael Hanselmann
                                        info):
2489 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2490 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2491 a8083063 Iustin Pop
        return False
2492 a8083063 Iustin Pop
    #HARDCODE
2493 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device, info):
2494 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2495 a8083063 Iustin Pop
                   device.iv_name)
2496 a8083063 Iustin Pop
      return False
2497 a8083063 Iustin Pop
  return True
2498 a8083063 Iustin Pop
2499 a8083063 Iustin Pop
2500 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2501 a8083063 Iustin Pop
  """Remove all disks for an instance.
2502 a8083063 Iustin Pop

2503 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2504 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2505 a8083063 Iustin Pop
  be remove, the removal will continue with the other ones (compare
2506 a8083063 Iustin Pop
  with `_CreateDisks()`).
2507 a8083063 Iustin Pop

2508 a8083063 Iustin Pop
  Args:
2509 a8083063 Iustin Pop
    instance: the instance object
2510 a8083063 Iustin Pop

2511 a8083063 Iustin Pop
  Returns:
2512 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2513 a8083063 Iustin Pop

2514 a8083063 Iustin Pop
  """
2515 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2516 a8083063 Iustin Pop
2517 a8083063 Iustin Pop
  result = True
2518 a8083063 Iustin Pop
  for device in instance.disks:
2519 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2520 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2521 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2522 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2523 a8083063 Iustin Pop
                     " continuing anyway" %
2524 a8083063 Iustin Pop
                     (device.iv_name, node))
2525 a8083063 Iustin Pop
        result = False
2526 a8083063 Iustin Pop
  return result
2527 a8083063 Iustin Pop
2528 a8083063 Iustin Pop
2529 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2530 a8083063 Iustin Pop
  """Create an instance.
2531 a8083063 Iustin Pop

2532 a8083063 Iustin Pop
  """
2533 a8083063 Iustin Pop
  HPATH = "instance-add"
2534 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2535 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2536 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2537 a8083063 Iustin Pop
              "wait_for_sync"]
2538 a8083063 Iustin Pop
2539 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2540 a8083063 Iustin Pop
    """Build hooks env.
2541 a8083063 Iustin Pop

2542 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2543 a8083063 Iustin Pop

2544 a8083063 Iustin Pop
    """
2545 a8083063 Iustin Pop
    env = {
2546 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2547 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2548 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2549 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2550 a8083063 Iustin Pop
      }
2551 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2552 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2553 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2554 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2555 396e1b78 Michael Hanselmann
2556 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2557 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2558 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2559 396e1b78 Michael Hanselmann
      status=self.instance_status,
2560 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2561 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2562 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2563 396e1b78 Michael Hanselmann
      nics=[(self.inst_ip, self.op.bridge)],
2564 396e1b78 Michael Hanselmann
    ))
2565 a8083063 Iustin Pop
2566 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2567 a8083063 Iustin Pop
          self.secondaries)
2568 a8083063 Iustin Pop
    return env, nl, nl
2569 a8083063 Iustin Pop
2570 a8083063 Iustin Pop
2571 a8083063 Iustin Pop
  def CheckPrereq(self):
2572 a8083063 Iustin Pop
    """Check prerequisites.
2573 a8083063 Iustin Pop

2574 a8083063 Iustin Pop
    """
2575 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2576 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2577 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2578 3ecf6786 Iustin Pop
                                 self.op.mode)
2579 a8083063 Iustin Pop
2580 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2581 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2582 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2583 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2584 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2585 3ecf6786 Iustin Pop
                                   " node and path options")
2586 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2587 a8083063 Iustin Pop
      if src_node_full is None:
2588 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2589 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2590 a8083063 Iustin Pop
2591 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2592 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2593 a8083063 Iustin Pop
2594 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2595 a8083063 Iustin Pop
2596 a8083063 Iustin Pop
      if not export_info:
2597 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2598 a8083063 Iustin Pop
2599 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2600 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2601 a8083063 Iustin Pop
2602 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2603 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2604 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2605 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2606 a8083063 Iustin Pop
2607 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2608 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2609 3ecf6786 Iustin Pop
                                   " one data disk")
2610 a8083063 Iustin Pop
2611 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2612 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2613 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2614 a8083063 Iustin Pop
                                                         'disk0_dump'))
2615 a8083063 Iustin Pop
      self.src_image = diskimage
2616 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2617 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2618 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2619 a8083063 Iustin Pop
2620 a8083063 Iustin Pop
    # check primary node
2621 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2622 a8083063 Iustin Pop
    if pnode is None:
2623 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2624 3ecf6786 Iustin Pop
                                 self.op.pnode)
2625 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2626 a8083063 Iustin Pop
    self.pnode = pnode
2627 a8083063 Iustin Pop
    self.secondaries = []
2628 a8083063 Iustin Pop
    # disk template and mirror node verification
2629 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2630 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2631 a8083063 Iustin Pop
2632 a8083063 Iustin Pop
    if self.op.disk_template == constants.DT_REMOTE_RAID1:
2633 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2634 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The 'remote_raid1' disk template needs"
2635 3ecf6786 Iustin Pop
                                   " a mirror node")
2636 a8083063 Iustin Pop
2637 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2638 a8083063 Iustin Pop
      if snode_name is None:
2639 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2640 3ecf6786 Iustin Pop
                                   self.op.snode)
2641 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2642 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
2643 3ecf6786 Iustin Pop
                                   " the primary node.")
2644 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2645 a8083063 Iustin Pop
2646 ed1ebc60 Guido Trotter
    # Check lv size requirements
2647 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2648 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2649 ed1ebc60 Guido Trotter
2650 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2651 ed1ebc60 Guido Trotter
    req_size_dict = {
2652 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2653 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2654 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2655 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2656 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2657 ed1ebc60 Guido Trotter
    }
2658 ed1ebc60 Guido Trotter
2659 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2660 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
2661 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
2662 ed1ebc60 Guido Trotter
2663 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2664 ed1ebc60 Guido Trotter
2665 ed1ebc60 Guido Trotter
    for node in nodenames:
2666 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2667 ed1ebc60 Guido Trotter
      if not info:
2668 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
2669 3ecf6786 Iustin Pop
                                   " from node '%s'" % nodeinfo)
2670 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2671 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s."
2672 3ecf6786 Iustin Pop
                                   " %d MB available, %d MB required" %
2673 3ecf6786 Iustin Pop
                                   (node, info['vg_free'], req_size))
2674 ed1ebc60 Guido Trotter
2675 a8083063 Iustin Pop
    # os verification
2676 a8083063 Iustin Pop
    os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2677 a8083063 Iustin Pop
    if not isinstance(os_obj, objects.OS):
2678 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
2679 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
2680 a8083063 Iustin Pop
2681 a8083063 Iustin Pop
    # instance verification
2682 a8083063 Iustin Pop
    hostname1 = utils.LookupHostname(self.op.instance_name)
2683 a8083063 Iustin Pop
    if not hostname1:
2684 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance name '%s' not found in dns" %
2685 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2686 a8083063 Iustin Pop
2687 a8083063 Iustin Pop
    self.op.instance_name = instance_name = hostname1['hostname']
2688 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2689 a8083063 Iustin Pop
    if instance_name in instance_list:
2690 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2691 3ecf6786 Iustin Pop
                                 instance_name)
2692 a8083063 Iustin Pop
2693 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2694 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2695 a8083063 Iustin Pop
      inst_ip = None
2696 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2697 a8083063 Iustin Pop
      inst_ip = hostname1['ip']
2698 a8083063 Iustin Pop
    else:
2699 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2700 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
2701 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
2702 a8083063 Iustin Pop
      inst_ip = ip
2703 a8083063 Iustin Pop
    self.inst_ip = inst_ip
2704 a8083063 Iustin Pop
2705 a8083063 Iustin Pop
    command = ["fping", "-q", hostname1['ip']]
2706 a8083063 Iustin Pop
    result = utils.RunCmd(command)
2707 a8083063 Iustin Pop
    if not result.failed:
2708 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("IP %s of instance %s already in use" %
2709 3ecf6786 Iustin Pop
                                 (hostname1['ip'], instance_name))
2710 a8083063 Iustin Pop
2711 a8083063 Iustin Pop
    # bridge verification
2712 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
2713 a8083063 Iustin Pop
    if bridge is None:
2714 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
2715 a8083063 Iustin Pop
    else:
2716 a8083063 Iustin Pop
      self.op.bridge = bridge
2717 a8083063 Iustin Pop
2718 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
2719 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
2720 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
2721 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
2722 a8083063 Iustin Pop
2723 a8083063 Iustin Pop
    if self.op.start:
2724 a8083063 Iustin Pop
      self.instance_status = 'up'
2725 a8083063 Iustin Pop
    else:
2726 a8083063 Iustin Pop
      self.instance_status = 'down'
2727 a8083063 Iustin Pop
2728 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2729 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
2730 a8083063 Iustin Pop

2731 a8083063 Iustin Pop
    """
2732 a8083063 Iustin Pop
    instance = self.op.instance_name
2733 a8083063 Iustin Pop
    pnode_name = self.pnode.name
2734 a8083063 Iustin Pop
2735 a8083063 Iustin Pop
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
2736 a8083063 Iustin Pop
    if self.inst_ip is not None:
2737 a8083063 Iustin Pop
      nic.ip = self.inst_ip
2738 a8083063 Iustin Pop
2739 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
2740 a8083063 Iustin Pop
                                  self.op.disk_template,
2741 a8083063 Iustin Pop
                                  instance, pnode_name,
2742 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
2743 a8083063 Iustin Pop
                                  self.op.swap_size)
2744 a8083063 Iustin Pop
2745 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
2746 a8083063 Iustin Pop
                            primary_node=pnode_name,
2747 a8083063 Iustin Pop
                            memory=self.op.mem_size,
2748 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
2749 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
2750 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
2751 a8083063 Iustin Pop
                            status=self.instance_status,
2752 a8083063 Iustin Pop
                            )
2753 a8083063 Iustin Pop
2754 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
2755 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
2756 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2757 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
2758 a8083063 Iustin Pop
2759 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
2760 a8083063 Iustin Pop
2761 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
2762 a8083063 Iustin Pop
2763 a8083063 Iustin Pop
    if self.op.wait_for_sync:
2764 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj)
2765 2a710df1 Michael Hanselmann
    elif iobj.disk_template == constants.DT_REMOTE_RAID1:
2766 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
2767 a8083063 Iustin Pop
      time.sleep(15)
2768 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
2769 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
2770 a8083063 Iustin Pop
    else:
2771 a8083063 Iustin Pop
      disk_abort = False
2772 a8083063 Iustin Pop
2773 a8083063 Iustin Pop
    if disk_abort:
2774 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2775 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
2776 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
2777 3ecf6786 Iustin Pop
                               " this instance")
2778 a8083063 Iustin Pop
2779 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
2780 a8083063 Iustin Pop
                (instance, pnode_name))
2781 a8083063 Iustin Pop
2782 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
2783 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
2784 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
2785 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
2786 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
2787 3ecf6786 Iustin Pop
                                   " on node %s" %
2788 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
2789 a8083063 Iustin Pop
2790 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
2791 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
2792 a8083063 Iustin Pop
        src_node = self.op.src_node
2793 a8083063 Iustin Pop
        src_image = self.src_image
2794 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
2795 a8083063 Iustin Pop
                                                src_node, src_image):
2796 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
2797 3ecf6786 Iustin Pop
                                   " %s on node %s" %
2798 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
2799 a8083063 Iustin Pop
      else:
2800 a8083063 Iustin Pop
        # also checked in the prereq part
2801 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
2802 3ecf6786 Iustin Pop
                                     % self.op.mode)
2803 a8083063 Iustin Pop
2804 a8083063 Iustin Pop
    if self.op.start:
2805 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
2806 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
2807 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
2808 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
2809 a8083063 Iustin Pop
2810 a8083063 Iustin Pop
2811 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
2812 a8083063 Iustin Pop
  """Connect to an instance's console.
2813 a8083063 Iustin Pop

2814 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
2815 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
2816 a8083063 Iustin Pop
  console.
2817 a8083063 Iustin Pop

2818 a8083063 Iustin Pop
  """
2819 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2820 a8083063 Iustin Pop
2821 a8083063 Iustin Pop
  def CheckPrereq(self):
2822 a8083063 Iustin Pop
    """Check prerequisites.
2823 a8083063 Iustin Pop

2824 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2825 a8083063 Iustin Pop

2826 a8083063 Iustin Pop
    """
2827 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2828 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2829 a8083063 Iustin Pop
    if instance is None:
2830 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2831 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2832 a8083063 Iustin Pop
    self.instance = instance
2833 a8083063 Iustin Pop
2834 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2835 a8083063 Iustin Pop
    """Connect to the console of an instance
2836 a8083063 Iustin Pop

2837 a8083063 Iustin Pop
    """
2838 a8083063 Iustin Pop
    instance = self.instance
2839 a8083063 Iustin Pop
    node = instance.primary_node
2840 a8083063 Iustin Pop
2841 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
2842 a8083063 Iustin Pop
    if node_insts is False:
2843 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
2844 a8083063 Iustin Pop
2845 a8083063 Iustin Pop
    if instance.name not in node_insts:
2846 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
2847 a8083063 Iustin Pop
2848 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
2849 a8083063 Iustin Pop
2850 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
2851 a8083063 Iustin Pop
    console_cmd = hyper.GetShellCommandForConsole(instance.name)
2852 82122173 Iustin Pop
    # build ssh cmdline
2853 82122173 Iustin Pop
    argv = ["ssh", "-q", "-t"]
2854 82122173 Iustin Pop
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
2855 82122173 Iustin Pop
    argv.extend(ssh.BATCH_MODE_OPTS)
2856 82122173 Iustin Pop
    argv.append(node)
2857 82122173 Iustin Pop
    argv.append(console_cmd)
2858 82122173 Iustin Pop
    return "ssh", argv
2859 a8083063 Iustin Pop
2860 a8083063 Iustin Pop
2861 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
2862 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
2863 a8083063 Iustin Pop

2864 a8083063 Iustin Pop
  """
2865 a8083063 Iustin Pop
  HPATH = "mirror-add"
2866 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2867 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
2868 a8083063 Iustin Pop
2869 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2870 a8083063 Iustin Pop
    """Build hooks env.
2871 a8083063 Iustin Pop

2872 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2873 a8083063 Iustin Pop

2874 a8083063 Iustin Pop
    """
2875 a8083063 Iustin Pop
    env = {
2876 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
2877 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
2878 a8083063 Iustin Pop
      }
2879 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2880 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
2881 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
2882 a8083063 Iustin Pop
    return env, nl, nl
2883 a8083063 Iustin Pop
2884 a8083063 Iustin Pop
  def CheckPrereq(self):
2885 a8083063 Iustin Pop
    """Check prerequisites.
2886 a8083063 Iustin Pop

2887 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2888 a8083063 Iustin Pop

2889 a8083063 Iustin Pop
    """
2890 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2891 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2892 a8083063 Iustin Pop
    if instance is None:
2893 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2894 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2895 a8083063 Iustin Pop
    self.instance = instance
2896 a8083063 Iustin Pop
2897 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
2898 a8083063 Iustin Pop
    if remote_node is None:
2899 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
2900 a8083063 Iustin Pop
    self.remote_node = remote_node
2901 a8083063 Iustin Pop
2902 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
2903 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
2904 3ecf6786 Iustin Pop
                                 " the instance.")
2905 a8083063 Iustin Pop
2906 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2907 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
2908 3ecf6786 Iustin Pop
                                 " remote_raid1.")
2909 a8083063 Iustin Pop
    for disk in instance.disks:
2910 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
2911 a8083063 Iustin Pop
        break
2912 a8083063 Iustin Pop
    else:
2913 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
2914 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
2915 a8083063 Iustin Pop
    if len(disk.children) > 1:
2916 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The device already has two slave"
2917 3ecf6786 Iustin Pop
                                 " devices.\n"
2918 3ecf6786 Iustin Pop
                                 "This would create a 3-disk raid1"
2919 3ecf6786 Iustin Pop
                                 " which we don't allow.")
2920 a8083063 Iustin Pop
    self.disk = disk
2921 a8083063 Iustin Pop
2922 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2923 a8083063 Iustin Pop
    """Add the mirror component
2924 a8083063 Iustin Pop

2925 a8083063 Iustin Pop
    """
2926 a8083063 Iustin Pop
    disk = self.disk
2927 a8083063 Iustin Pop
    instance = self.instance
2928 a8083063 Iustin Pop
2929 a8083063 Iustin Pop
    remote_node = self.remote_node
2930 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
2931 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
2932 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
2933 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
2934 a8083063 Iustin Pop
2935 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
2936 a8083063 Iustin Pop
    #HARDCODE
2937 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False,
2938 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
2939 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
2940 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
2941 a8083063 Iustin Pop
2942 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
2943 a8083063 Iustin Pop
    #HARDCODE
2944 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd,
2945 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
2946 a8083063 Iustin Pop
      # remove secondary dev
2947 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
2948 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
2949 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
2950 a8083063 Iustin Pop
2951 a8083063 Iustin Pop
    # the device exists now
2952 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
2953 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
2954 a8083063 Iustin Pop
    if not rpc.call_blockdev_addchild(instance.primary_node,
2955 a8083063 Iustin Pop
                                           disk, new_drbd):
2956 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
2957 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
2958 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
2959 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
2960 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
2961 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
2962 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
2963 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
2964 a8083063 Iustin Pop
2965 a8083063 Iustin Pop
    disk.children.append(new_drbd)
2966 a8083063 Iustin Pop
2967 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2968 a8083063 Iustin Pop
2969 a8083063 Iustin Pop
    _WaitForSync(self.cfg, instance)
2970 a8083063 Iustin Pop
2971 a8083063 Iustin Pop
    return 0
2972 a8083063 Iustin Pop
2973 a8083063 Iustin Pop
2974 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
2975 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
2976 a8083063 Iustin Pop

2977 a8083063 Iustin Pop
  """
2978 a8083063 Iustin Pop
  HPATH = "mirror-remove"
2979 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2980 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
2981 a8083063 Iustin Pop
2982 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2983 a8083063 Iustin Pop
    """Build hooks env.
2984 a8083063 Iustin Pop

2985 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2986 a8083063 Iustin Pop

2987 a8083063 Iustin Pop
    """
2988 a8083063 Iustin Pop
    env = {
2989 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
2990 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
2991 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
2992 a8083063 Iustin Pop
      }
2993 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2994 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
2995 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
2996 a8083063 Iustin Pop
    return env, nl, nl
2997 a8083063 Iustin Pop
2998 a8083063 Iustin Pop
  def CheckPrereq(self):
2999 a8083063 Iustin Pop
    """Check prerequisites.
3000 a8083063 Iustin Pop

3001 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3002 a8083063 Iustin Pop

3003 a8083063 Iustin Pop
    """
3004 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3005 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3006 a8083063 Iustin Pop
    if instance is None:
3007 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3008 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3009 a8083063 Iustin Pop
    self.instance = instance
3010 a8083063 Iustin Pop
3011 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3012 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3013 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3014 a8083063 Iustin Pop
    for disk in instance.disks:
3015 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3016 a8083063 Iustin Pop
        break
3017 a8083063 Iustin Pop
    else:
3018 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3019 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3020 a8083063 Iustin Pop
    for child in disk.children:
3021 a8083063 Iustin Pop
      if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id:
3022 a8083063 Iustin Pop
        break
3023 a8083063 Iustin Pop
    else:
3024 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3025 a8083063 Iustin Pop
3026 a8083063 Iustin Pop
    if len(disk.children) < 2:
3027 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3028 3ecf6786 Iustin Pop
                                 " a mirror.")
3029 a8083063 Iustin Pop
    self.disk = disk
3030 a8083063 Iustin Pop
    self.child = child
3031 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3032 a8083063 Iustin Pop
      oid = 1
3033 a8083063 Iustin Pop
    else:
3034 a8083063 Iustin Pop
      oid = 0
3035 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3036 a8083063 Iustin Pop
3037 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3038 a8083063 Iustin Pop
    """Remove the mirror component
3039 a8083063 Iustin Pop

3040 a8083063 Iustin Pop
    """
3041 a8083063 Iustin Pop
    instance = self.instance
3042 a8083063 Iustin Pop
    disk = self.disk
3043 a8083063 Iustin Pop
    child = self.child
3044 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3045 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3046 a8083063 Iustin Pop
    if not rpc.call_blockdev_removechild(instance.primary_node,
3047 a8083063 Iustin Pop
                                              disk, child):
3048 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3049 a8083063 Iustin Pop
3050 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3051 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3052 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3053 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3054 a8083063 Iustin Pop
                     " continuing operation." % node)
3055 a8083063 Iustin Pop
3056 a8083063 Iustin Pop
    disk.children.remove(child)
3057 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3058 a8083063 Iustin Pop
3059 a8083063 Iustin Pop
3060 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3061 a8083063 Iustin Pop
  """Replace the disks of an instance.
3062 a8083063 Iustin Pop

3063 a8083063 Iustin Pop
  """
3064 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3065 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3066 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3067 a8083063 Iustin Pop
3068 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3069 a8083063 Iustin Pop
    """Build hooks env.
3070 a8083063 Iustin Pop

3071 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3072 a8083063 Iustin Pop

3073 a8083063 Iustin Pop
    """
3074 a8083063 Iustin Pop
    env = {
3075 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3076 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3077 a8083063 Iustin Pop
      }
3078 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3079 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3080 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3081 a8083063 Iustin Pop
    return env, nl, nl
3082 a8083063 Iustin Pop
3083 a8083063 Iustin Pop
  def CheckPrereq(self):
3084 a8083063 Iustin Pop
    """Check prerequisites.
3085 a8083063 Iustin Pop

3086 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3087 a8083063 Iustin Pop

3088 a8083063 Iustin Pop
    """
3089 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3090 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3091 a8083063 Iustin Pop
    if instance is None:
3092 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3093 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3094 a8083063 Iustin Pop
    self.instance = instance
3095 a8083063 Iustin Pop
3096 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3097 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3098 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3099 a8083063 Iustin Pop
3100 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3101 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3102 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3103 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3104 a8083063 Iustin Pop
3105 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3106 a8083063 Iustin Pop
    if remote_node is None:
3107 a8083063 Iustin Pop
      remote_node = instance.secondary_nodes[0]
3108 a8083063 Iustin Pop
    else:
3109 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3110 a8083063 Iustin Pop
      if remote_node is None:
3111 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3112 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3113 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3114 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3115 3ecf6786 Iustin Pop
                                 " the instance.")
3116 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3117 a8083063 Iustin Pop
3118 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3119 a8083063 Iustin Pop
    """Replace the disks of an instance.
3120 a8083063 Iustin Pop

3121 a8083063 Iustin Pop
    """
3122 a8083063 Iustin Pop
    instance = self.instance
3123 a8083063 Iustin Pop
    iv_names = {}
3124 a8083063 Iustin Pop
    # start of work
3125 a8083063 Iustin Pop
    remote_node = self.op.remote_node
3126 a8083063 Iustin Pop
    cfg = self.cfg
3127 a8083063 Iustin Pop
    for dev in instance.disks:
3128 a8083063 Iustin Pop
      size = dev.size
3129 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3130 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3131 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3132 923b1523 Iustin Pop
                                       remote_node, size, names)
3133 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3134 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3135 a8083063 Iustin Pop
                  dev.iv_name)
3136 a8083063 Iustin Pop
      #HARDCODE
3137 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False,
3138 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3139 3ecf6786 Iustin Pop
        raise errors.OpExecError("Failed to create new component on"
3140 3ecf6786 Iustin Pop
                                 " secondary node %s\n"
3141 3ecf6786 Iustin Pop
                                 "Full abort, cleanup manually!" %
3142 3ecf6786 Iustin Pop
                                 remote_node)
3143 a8083063 Iustin Pop
3144 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3145 a8083063 Iustin Pop
      #HARDCODE
3146 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd,
3147 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3148 a8083063 Iustin Pop
        # remove secondary dev
3149 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3150 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3151 a8083063 Iustin Pop
        raise errors.OpExecError("Failed to create volume on primary!\n"
3152 a8083063 Iustin Pop
                                 "Full abort, cleanup manually!!")
3153 a8083063 Iustin Pop
3154 a8083063 Iustin Pop
      # the device exists now
3155 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3156 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3157 a8083063 Iustin Pop
      if not rpc.call_blockdev_addchild(instance.primary_node, dev,
3158 880478f8 Iustin Pop
                                        new_drbd):
3159 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3160 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3161 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3162 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3163 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3164 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3165 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3166 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3167 a8083063 Iustin Pop
3168 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3169 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3170 a8083063 Iustin Pop
3171 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3172 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3173 a8083063 Iustin Pop
    # return value
3174 a8083063 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3175 a8083063 Iustin Pop
3176 a8083063 Iustin Pop
    # so check manually all the devices
3177 a8083063 Iustin Pop
    for name in iv_names:
3178 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3179 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3180 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3181 a8083063 Iustin Pop
      if is_degr:
3182 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3183 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3184 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3185 a8083063 Iustin Pop
      if is_degr:
3186 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3187 a8083063 Iustin Pop
3188 a8083063 Iustin Pop
    for name in iv_names:
3189 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3190 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3191 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3192 a8083063 Iustin Pop
      if not rpc.call_blockdev_removechild(instance.primary_node,
3193 a8083063 Iustin Pop
                                                dev, child):
3194 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3195 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3196 a8083063 Iustin Pop
        continue
3197 a8083063 Iustin Pop
3198 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3199 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3200 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3201 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3202 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3203 a8083063 Iustin Pop
                       " continuing operation." % node)
3204 a8083063 Iustin Pop
3205 a8083063 Iustin Pop
      dev.children.remove(child)
3206 a8083063 Iustin Pop
3207 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3208 a8083063 Iustin Pop
3209 a8083063 Iustin Pop
3210 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3211 a8083063 Iustin Pop
  """Query runtime instance data.
3212 a8083063 Iustin Pop

3213 a8083063 Iustin Pop
  """
3214 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3215 a8083063 Iustin Pop
3216 a8083063 Iustin Pop
  def CheckPrereq(self):
3217 a8083063 Iustin Pop
    """Check prerequisites.
3218 a8083063 Iustin Pop

3219 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3220 a8083063 Iustin Pop

3221 a8083063 Iustin Pop
    """
3222 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3223 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3224 a8083063 Iustin Pop
    if self.op.instances:
3225 a8083063 Iustin Pop
      self.wanted_instances = []
3226 a8083063 Iustin Pop
      names = self.op.instances
3227 a8083063 Iustin Pop
      for name in names:
3228 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3229 a8083063 Iustin Pop
        if instance is None:
3230 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3231 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3232 a8083063 Iustin Pop
    else:
3233 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3234 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3235 a8083063 Iustin Pop
    return
3236 a8083063 Iustin Pop
3237 a8083063 Iustin Pop
3238 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3239 a8083063 Iustin Pop
    """Compute block device status.
3240 a8083063 Iustin Pop

3241 a8083063 Iustin Pop
    """
3242 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3243 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3244 a8083063 Iustin Pop
    if dev.dev_type == "drbd":
3245 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3246 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3247 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3248 a8083063 Iustin Pop
      else:
3249 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3250 a8083063 Iustin Pop
3251 a8083063 Iustin Pop
    if snode:
3252 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3253 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3254 a8083063 Iustin Pop
    else:
3255 a8083063 Iustin Pop
      dev_sstatus = None
3256 a8083063 Iustin Pop
3257 a8083063 Iustin Pop
    if dev.children:
3258 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3259 a8083063 Iustin Pop
                      for child in dev.children]
3260 a8083063 Iustin Pop
    else:
3261 a8083063 Iustin Pop
      dev_children = []
3262 a8083063 Iustin Pop
3263 a8083063 Iustin Pop
    data = {
3264 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3265 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3266 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3267 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3268 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3269 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3270 a8083063 Iustin Pop
      "children": dev_children,
3271 a8083063 Iustin Pop
      }
3272 a8083063 Iustin Pop
3273 a8083063 Iustin Pop
    return data
3274 a8083063 Iustin Pop
3275 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3276 a8083063 Iustin Pop
    """Gather and return data"""
3277 a8083063 Iustin Pop
    result = {}
3278 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3279 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3280 a8083063 Iustin Pop
                                                instance.name)
3281 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3282 a8083063 Iustin Pop
        remote_state = "up"
3283 a8083063 Iustin Pop
      else:
3284 a8083063 Iustin Pop
        remote_state = "down"
3285 a8083063 Iustin Pop
      if instance.status == "down":
3286 a8083063 Iustin Pop
        config_state = "down"
3287 a8083063 Iustin Pop
      else:
3288 a8083063 Iustin Pop
        config_state = "up"
3289 a8083063 Iustin Pop
3290 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3291 a8083063 Iustin Pop
               for device in instance.disks]
3292 a8083063 Iustin Pop
3293 a8083063 Iustin Pop
      idict = {
3294 a8083063 Iustin Pop
        "name": instance.name,
3295 a8083063 Iustin Pop
        "config_state": config_state,
3296 a8083063 Iustin Pop
        "run_state": remote_state,
3297 a8083063 Iustin Pop
        "pnode": instance.primary_node,
3298 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
3299 a8083063 Iustin Pop
        "os": instance.os,
3300 a8083063 Iustin Pop
        "memory": instance.memory,
3301 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3302 a8083063 Iustin Pop
        "disks": disks,
3303 a8083063 Iustin Pop
        }
3304 a8083063 Iustin Pop
3305 a8083063 Iustin Pop
      result[instance.name] = idict
3306 a8083063 Iustin Pop
3307 a8083063 Iustin Pop
    return result
3308 a8083063 Iustin Pop
3309 a8083063 Iustin Pop
3310 a8083063 Iustin Pop
class LUQueryNodeData(NoHooksLU):
3311 a8083063 Iustin Pop
  """Logical unit for querying node data.
3312 a8083063 Iustin Pop

3313 a8083063 Iustin Pop
  """
3314 a8083063 Iustin Pop
  _OP_REQP = ["nodes"]
3315 a8083063 Iustin Pop
3316 a8083063 Iustin Pop
  def CheckPrereq(self):
3317 a8083063 Iustin Pop
    """Check prerequisites.
3318 a8083063 Iustin Pop

3319 a8083063 Iustin Pop
    This only checks the optional node list against the existing names.
3320 a8083063 Iustin Pop

3321 a8083063 Iustin Pop
    """
3322 dcb93971 Michael Hanselmann
    self.wanted_nodes = _GetWantedNodes(self, self.op.nodes)
3323 a8083063 Iustin Pop
3324 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3325 a8083063 Iustin Pop
    """Compute and return the list of nodes.
3326 a8083063 Iustin Pop

3327 a8083063 Iustin Pop
    """
3328 a8083063 Iustin Pop
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3329 a8083063 Iustin Pop
             in self.cfg.GetInstanceList()]
3330 a8083063 Iustin Pop
    result = []
3331 a8083063 Iustin Pop
    for node in self.wanted_nodes:
3332 a8083063 Iustin Pop
      result.append((node.name, node.primary_ip, node.secondary_ip,
3333 a8083063 Iustin Pop
                     [inst.name for inst in ilist
3334 a8083063 Iustin Pop
                      if inst.primary_node == node.name],
3335 a8083063 Iustin Pop
                     [inst.name for inst in ilist
3336 a8083063 Iustin Pop
                      if node.name in inst.secondary_nodes],
3337 a8083063 Iustin Pop
                     ))
3338 a8083063 Iustin Pop
    return result
3339 a8083063 Iustin Pop
3340 a8083063 Iustin Pop
3341 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
3342 a8083063 Iustin Pop
  """Modifies an instances's parameters.
3343 a8083063 Iustin Pop

3344 a8083063 Iustin Pop
  """
3345 a8083063 Iustin Pop
  HPATH = "instance-modify"
3346 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3347 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3348 a8083063 Iustin Pop
3349 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3350 a8083063 Iustin Pop
    """Build hooks env.
3351 a8083063 Iustin Pop

3352 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
3353 a8083063 Iustin Pop

3354 a8083063 Iustin Pop
    """
3355 396e1b78 Michael Hanselmann
    args = dict()
3356 a8083063 Iustin Pop
    if self.mem:
3357 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
3358 a8083063 Iustin Pop
    if self.vcpus:
3359 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
3360 396e1b78 Michael Hanselmann
    if self.do_ip or self.do_bridge:
3361 396e1b78 Michael Hanselmann
      if self.do_ip:
3362 396e1b78 Michael Hanselmann
        ip = self.ip
3363 396e1b78 Michael Hanselmann
      else:
3364 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
3365 396e1b78 Michael Hanselmann
      if self.bridge:
3366 396e1b78 Michael Hanselmann
        bridge = self.bridge
3367 396e1b78 Michael Hanselmann
      else:
3368 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
3369 396e1b78 Michael Hanselmann
      args['nics'] = [(ip, bridge)]
3370 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
3371 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3372 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3373 a8083063 Iustin Pop
    return env, nl, nl
3374 a8083063 Iustin Pop
3375 a8083063 Iustin Pop
  def CheckPrereq(self):
3376 a8083063 Iustin Pop
    """Check prerequisites.
3377 a8083063 Iustin Pop

3378 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
3379 a8083063 Iustin Pop

3380 a8083063 Iustin Pop
    """
3381 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
3382 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
3383 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
3384 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
3385 a8083063 Iustin Pop
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
3386 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
3387 a8083063 Iustin Pop
    if self.mem is not None:
3388 a8083063 Iustin Pop
      try:
3389 a8083063 Iustin Pop
        self.mem = int(self.mem)
3390 a8083063 Iustin Pop
      except ValueError, err:
3391 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
3392 a8083063 Iustin Pop
    if self.vcpus is not None:
3393 a8083063 Iustin Pop
      try:
3394 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
3395 a8083063 Iustin Pop
      except ValueError, err:
3396 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
3397 a8083063 Iustin Pop
    if self.ip is not None:
3398 a8083063 Iustin Pop
      self.do_ip = True
3399 a8083063 Iustin Pop
      if self.ip.lower() == "none":
3400 a8083063 Iustin Pop
        self.ip = None
3401 a8083063 Iustin Pop
      else:
3402 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
3403 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
3404 a8083063 Iustin Pop
    else:
3405 a8083063 Iustin Pop
      self.do_ip = False
3406 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
3407 a8083063 Iustin Pop
3408 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3409 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3410 a8083063 Iustin Pop
    if instance is None:
3411 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
3412 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3413 a8083063 Iustin Pop
    self.op.instance_name = instance.name
3414 a8083063 Iustin Pop
    self.instance = instance
3415 a8083063 Iustin Pop
    return
3416 a8083063 Iustin Pop
3417 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3418 a8083063 Iustin Pop
    """Modifies an instance.
3419 a8083063 Iustin Pop

3420 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
3421 a8083063 Iustin Pop
    """
3422 a8083063 Iustin Pop
    result = []
3423 a8083063 Iustin Pop
    instance = self.instance
3424 a8083063 Iustin Pop
    if self.mem:
3425 a8083063 Iustin Pop
      instance.memory = self.mem
3426 a8083063 Iustin Pop
      result.append(("mem", self.mem))
3427 a8083063 Iustin Pop
    if self.vcpus:
3428 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
3429 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
3430 a8083063 Iustin Pop
    if self.do_ip:
3431 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
3432 a8083063 Iustin Pop
      result.append(("ip", self.ip))
3433 a8083063 Iustin Pop
    if self.bridge:
3434 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
3435 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
3436 a8083063 Iustin Pop
3437 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3438 a8083063 Iustin Pop
3439 a8083063 Iustin Pop
    return result
3440 a8083063 Iustin Pop
3441 a8083063 Iustin Pop
3442 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
3443 a8083063 Iustin Pop
  """Query the exports list
3444 a8083063 Iustin Pop

3445 a8083063 Iustin Pop
  """
3446 a8083063 Iustin Pop
  _OP_REQP = []
3447 a8083063 Iustin Pop
3448 a8083063 Iustin Pop
  def CheckPrereq(self):
3449 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
3450 a8083063 Iustin Pop

3451 a8083063 Iustin Pop
    """
3452 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
3453 a8083063 Iustin Pop
3454 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3455 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
3456 a8083063 Iustin Pop

3457 a8083063 Iustin Pop
    Returns:
3458 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
3459 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
3460 a8083063 Iustin Pop
      that node.
3461 a8083063 Iustin Pop

3462 a8083063 Iustin Pop
    """
3463 dcb93971 Michael Hanselmann
    return rpc.call_export_list([node.name for node in self.nodes])
3464 a8083063 Iustin Pop
3465 a8083063 Iustin Pop
3466 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
3467 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
3468 a8083063 Iustin Pop

3469 a8083063 Iustin Pop
  """
3470 a8083063 Iustin Pop
  HPATH = "instance-export"
3471 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3472 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
3473 a8083063 Iustin Pop
3474 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3475 a8083063 Iustin Pop
    """Build hooks env.
3476 a8083063 Iustin Pop

3477 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
3478 a8083063 Iustin Pop

3479 a8083063 Iustin Pop
    """
3480 a8083063 Iustin Pop
    env = {
3481 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
3482 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
3483 a8083063 Iustin Pop
      }
3484 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3485 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3486 a8083063 Iustin Pop
          self.op.target_node]
3487 a8083063 Iustin Pop
    return env, nl, nl
3488 a8083063 Iustin Pop
3489 a8083063 Iustin Pop
  def CheckPrereq(self):
3490 a8083063 Iustin Pop
    """Check prerequisites.
3491 a8083063 Iustin Pop

3492 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
3493 a8083063 Iustin Pop

3494 a8083063 Iustin Pop
    """
3495 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
3496 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
3497 a8083063 Iustin Pop
    if self.instance is None:
3498 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
3499 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3500 a8083063 Iustin Pop
3501 a8083063 Iustin Pop
    # node verification
3502 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
3503 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
3504 a8083063 Iustin Pop
3505 a8083063 Iustin Pop
    if self.dst_node is None:
3506 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
3507 3ecf6786 Iustin Pop
                                 self.op.target_node)
3508 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
3509 a8083063 Iustin Pop
3510 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3511 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
3512 a8083063 Iustin Pop

3513 a8083063 Iustin Pop
    """
3514 a8083063 Iustin Pop
    instance = self.instance
3515 a8083063 Iustin Pop
    dst_node = self.dst_node
3516 a8083063 Iustin Pop
    src_node = instance.primary_node
3517 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
3518 a8083063 Iustin Pop
    if self.op.shutdown:
3519 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
3520 a8083063 Iustin Pop
      self.processor.ChainOpCode(op, feedback_fn)
3521 a8083063 Iustin Pop
3522 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
3523 a8083063 Iustin Pop
3524 a8083063 Iustin Pop
    snap_disks = []
3525 a8083063 Iustin Pop
3526 a8083063 Iustin Pop
    try:
3527 a8083063 Iustin Pop
      for disk in instance.disks:
3528 a8083063 Iustin Pop
        if disk.iv_name == "sda":
3529 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
3530 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
3531 a8083063 Iustin Pop
3532 a8083063 Iustin Pop
          if not new_dev_name:
3533 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
3534 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
3535 a8083063 Iustin Pop
          else:
3536 a8083063 Iustin Pop
            new_dev = objects.Disk(dev_type="lvm", size=disk.size,
3537 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
3538 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
3539 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
3540 a8083063 Iustin Pop
            snap_disks.append(new_dev)
3541 a8083063 Iustin Pop
3542 a8083063 Iustin Pop
    finally:
3543 a8083063 Iustin Pop
      if self.op.shutdown:
3544 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
3545 a8083063 Iustin Pop
                                       force=False)
3546 a8083063 Iustin Pop
        self.processor.ChainOpCode(op, feedback_fn)
3547 a8083063 Iustin Pop
3548 a8083063 Iustin Pop
    # TODO: check for size
3549 a8083063 Iustin Pop
3550 a8083063 Iustin Pop
    for dev in snap_disks:
3551 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
3552 a8083063 Iustin Pop
                                           instance):
3553 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
3554 a8083063 Iustin Pop
                     " %s to node %s" %
3555 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
3556 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
3557 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
3558 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
3559 a8083063 Iustin Pop
3560 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
3561 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
3562 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
3563 a8083063 Iustin Pop
3564 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
3565 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
3566 a8083063 Iustin Pop
3567 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
3568 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
3569 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
3570 a8083063 Iustin Pop
    if nodelist:
3571 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
3572 a8083063 Iustin Pop
      exportlist = self.processor.ChainOpCode(op, feedback_fn)
3573 a8083063 Iustin Pop
      for node in exportlist:
3574 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
3575 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
3576 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
3577 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
3578 5c947f38 Iustin Pop
3579 5c947f38 Iustin Pop
3580 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
3581 5c947f38 Iustin Pop
  """Generic tags LU.
3582 5c947f38 Iustin Pop

3583 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
3584 5c947f38 Iustin Pop

3585 5c947f38 Iustin Pop
  """
3586 5c947f38 Iustin Pop
  def CheckPrereq(self):
3587 5c947f38 Iustin Pop
    """Check prerequisites.
3588 5c947f38 Iustin Pop

3589 5c947f38 Iustin Pop
    """
3590 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
3591 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
3592 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
3593 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
3594 5c947f38 Iustin Pop
      if name is None:
3595 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
3596 3ecf6786 Iustin Pop
                                   (self.op.name,))
3597 5c947f38 Iustin Pop
      self.op.name = name
3598 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
3599 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
3600 5c947f38 Iustin Pop
      name = self.cfg.ExpandInstanceName(name)
3601 5c947f38 Iustin Pop
      if name is None:
3602 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
3603 3ecf6786 Iustin Pop
                                   (self.op.name,))
3604 5c947f38 Iustin Pop
      self.op.name = name
3605 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
3606 5c947f38 Iustin Pop
    else:
3607 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
3608 3ecf6786 Iustin Pop
                                 str(self.op.kind))
3609 5c947f38 Iustin Pop
3610 5c947f38 Iustin Pop
3611 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
3612 5c947f38 Iustin Pop
  """Returns the tags of a given object.
3613 5c947f38 Iustin Pop

3614 5c947f38 Iustin Pop
  """
3615 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
3616 5c947f38 Iustin Pop
3617 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
3618 5c947f38 Iustin Pop
    """Returns the tag list.
3619 5c947f38 Iustin Pop

3620 5c947f38 Iustin Pop
    """
3621 5c947f38 Iustin Pop
    return self.target.GetTags()
3622 5c947f38 Iustin Pop
3623 5c947f38 Iustin Pop
3624 5c947f38 Iustin Pop
class LUAddTag(TagsLU):
3625 5c947f38 Iustin Pop
  """Sets a tag on a given object.
3626 5c947f38 Iustin Pop

3627 5c947f38 Iustin Pop
  """
3628 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name", "tag"]
3629 5c947f38 Iustin Pop
3630 5c947f38 Iustin Pop
  def CheckPrereq(self):
3631 5c947f38 Iustin Pop
    """Check prerequisites.
3632 5c947f38 Iustin Pop

3633 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
3634 5c947f38 Iustin Pop

3635 5c947f38 Iustin Pop
    """
3636 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
3637 5c947f38 Iustin Pop
    objects.TaggableObject.ValidateTag(self.op.tag)
3638 5c947f38 Iustin Pop
3639 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
3640 5c947f38 Iustin Pop
    """Sets the tag.
3641 5c947f38 Iustin Pop

3642 5c947f38 Iustin Pop
    """
3643 5c947f38 Iustin Pop
    try:
3644 5c947f38 Iustin Pop
      self.target.AddTag(self.op.tag)
3645 5c947f38 Iustin Pop
    except errors.TagError, err:
3646 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
3647 5c947f38 Iustin Pop
    try:
3648 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
3649 5c947f38 Iustin Pop
    except errors.ConfigurationError:
3650 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
3651 3ecf6786 Iustin Pop
                                " config file and the operation has been"
3652 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
3653 5c947f38 Iustin Pop
3654 5c947f38 Iustin Pop
3655 5c947f38 Iustin Pop
class LUDelTag(TagsLU):
3656 5c947f38 Iustin Pop
  """Delete a tag from a given object.
3657 5c947f38 Iustin Pop

3658 5c947f38 Iustin Pop
  """
3659 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name", "tag"]
3660 5c947f38 Iustin Pop
3661 5c947f38 Iustin Pop
  def CheckPrereq(self):
3662 5c947f38 Iustin Pop
    """Check prerequisites.
3663 5c947f38 Iustin Pop

3664 5c947f38 Iustin Pop
    This checks that we have the given tag.
3665 5c947f38 Iustin Pop

3666 5c947f38 Iustin Pop
    """
3667 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
3668 5c947f38 Iustin Pop
    objects.TaggableObject.ValidateTag(self.op.tag)
3669 5c947f38 Iustin Pop
    if self.op.tag not in self.target.GetTags():
3670 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Tag not found")
3671 5c947f38 Iustin Pop
3672 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
3673 5c947f38 Iustin Pop
    """Remove the tag from the object.
3674 5c947f38 Iustin Pop

3675 5c947f38 Iustin Pop
    """
3676 5c947f38 Iustin Pop
    self.target.RemoveTag(self.op.tag)
3677 5c947f38 Iustin Pop
    try:
3678 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
3679 5c947f38 Iustin Pop
    except errors.ConfigurationError:
3680 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
3681 3ecf6786 Iustin Pop
                                " config file and the operation has been"
3682 3ecf6786 Iustin Pop
                                " aborted. Please retry.")