Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 83120a01

History | View | Annotate | Download (109.5 kB)

1 a8083063 Iustin Pop
#!/usr/bin/python
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import socket
30 a8083063 Iustin Pop
import time
31 a8083063 Iustin Pop
import tempfile
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 a8083063 Iustin Pop
35 a8083063 Iustin Pop
from ganeti import rpc
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 a8083063 Iustin Pop
from ganeti import config
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 a8083063 Iustin Pop
from ganeti import ssconf
46 a8083063 Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 a8083063 Iustin Pop
  """Logical Unit base class..
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
52 a8083063 Iustin Pop
      with all the fields (even if as None)
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
57 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 a8083063 Iustin Pop
  REQ_CLUSTER = True
64 a8083063 Iustin Pop
  REQ_MASTER = True
65 a8083063 Iustin Pop
66 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
67 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
68 a8083063 Iustin Pop

69 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
70 a8083063 Iustin Pop
    validity.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    """
73 a8083063 Iustin Pop
    self.processor = processor
74 a8083063 Iustin Pop
    self.op = op
75 a8083063 Iustin Pop
    self.cfg = cfg
76 a8083063 Iustin Pop
    self.sstore = sstore
77 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
78 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
79 a8083063 Iustin Pop
      if attr_val is None:
80 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Required parameter '%s' missing" %
81 a8083063 Iustin Pop
                                     attr_name)
82 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
83 a8083063 Iustin Pop
      if not cfg.IsCluster():
84 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Cluster not initialized yet,"
85 a8083063 Iustin Pop
                                     " use 'gnt-cluster init' first.")
86 a8083063 Iustin Pop
      if self.REQ_MASTER:
87 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
88 a8083063 Iustin Pop
        if master != socket.gethostname():
89 a8083063 Iustin Pop
          raise errors.OpPrereqError, ("Commands must be run on the master"
90 a8083063 Iustin Pop
                                       " node %s" % master)
91 a8083063 Iustin Pop
92 a8083063 Iustin Pop
  def CheckPrereq(self):
93 a8083063 Iustin Pop
    """Check prerequisites for this LU.
94 a8083063 Iustin Pop

95 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
96 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
97 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
98 a8083063 Iustin Pop
    allowed.
99 a8083063 Iustin Pop

100 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
101 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
102 a8083063 Iustin Pop

103 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
104 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
105 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
106 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
107 a8083063 Iustin Pop

108 a8083063 Iustin Pop
    """
109 a8083063 Iustin Pop
    raise NotImplementedError
110 a8083063 Iustin Pop
111 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
112 a8083063 Iustin Pop
    """Execute the LU.
113 a8083063 Iustin Pop

114 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
115 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
116 a8083063 Iustin Pop
    code, or expected.
117 a8083063 Iustin Pop

118 a8083063 Iustin Pop
    """
119 a8083063 Iustin Pop
    raise NotImplementedError
120 a8083063 Iustin Pop
121 a8083063 Iustin Pop
  def BuildHooksEnv(self):
122 a8083063 Iustin Pop
    """Build hooks environment for this LU.
123 a8083063 Iustin Pop

124 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
125 a8083063 Iustin Pop
    containing the environment that will be used for running the
126 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
127 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
128 a8083063 Iustin Pop
    the hook should run after the execution.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
131 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
132 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
133 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
134 a8083063 Iustin Pop

135 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
136 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
137 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
138 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
139 a8083063 Iustin Pop
    None).
140 a8083063 Iustin Pop

141 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
142 a8083063 Iustin Pop
    not be called.
143 a8083063 Iustin Pop

144 a8083063 Iustin Pop
    """
145 a8083063 Iustin Pop
    raise NotImplementedError
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
148 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
149 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
150 a8083063 Iustin Pop

151 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
152 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
153 a8083063 Iustin Pop

154 a8083063 Iustin Pop
  """
155 a8083063 Iustin Pop
  HPATH = None
156 a8083063 Iustin Pop
  HTYPE = None
157 a8083063 Iustin Pop
158 a8083063 Iustin Pop
  def BuildHooksEnv(self):
159 a8083063 Iustin Pop
    """Build hooks env.
160 a8083063 Iustin Pop

161 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
162 a8083063 Iustin Pop

163 a8083063 Iustin Pop
    """
164 a8083063 Iustin Pop
    return
165 a8083063 Iustin Pop
166 a8083063 Iustin Pop
167 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
168 83120a01 Michael Hanselmann
  """Returns list of checked and expanded nodes.
169 83120a01 Michael Hanselmann

170 83120a01 Michael Hanselmann
  Args:
171 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
172 83120a01 Michael Hanselmann

173 83120a01 Michael Hanselmann
  """
174 dcb93971 Michael Hanselmann
  if nodes is not None and not isinstance(nodes, list):
175 dcb93971 Michael Hanselmann
    raise errors.OpPrereqError, "Invalid argument type 'nodes'"
176 dcb93971 Michael Hanselmann
177 dcb93971 Michael Hanselmann
  if nodes:
178 dcb93971 Michael Hanselmann
    wanted_nodes = []
179 dcb93971 Michael Hanselmann
180 dcb93971 Michael Hanselmann
    for name in nodes:
181 dcb93971 Michael Hanselmann
      node = lu.cfg.GetNodeInfo(lu.cfg.ExpandNodeName(name))
182 dcb93971 Michael Hanselmann
      if node is None:
183 dcb93971 Michael Hanselmann
        raise errors.OpPrereqError, ("No such node name '%s'" % name)
184 dcb93971 Michael Hanselmann
    wanted_nodes.append(node)
185 dcb93971 Michael Hanselmann
186 dcb93971 Michael Hanselmann
    return wanted_nodes
187 dcb93971 Michael Hanselmann
  else:
188 dcb93971 Michael Hanselmann
    return [lu.cfg.GetNodeInfo(name) for name in lu.cfg.GetNodeList()]
189 dcb93971 Michael Hanselmann
190 dcb93971 Michael Hanselmann
191 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
192 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
193 83120a01 Michael Hanselmann

194 83120a01 Michael Hanselmann
  Args:
195 83120a01 Michael Hanselmann
    static: Static fields
196 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
197 83120a01 Michael Hanselmann

198 83120a01 Michael Hanselmann
  """
199 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
200 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
201 dcb93971 Michael Hanselmann
202 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
203 dcb93971 Michael Hanselmann
204 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
205 83120a01 Michael Hanselmann
    raise errors.OpPrereqError, ("Unknown output fields selected: %s"
206 83120a01 Michael Hanselmann
                                 % ",".join(frozenset(selected).
207 83120a01 Michael Hanselmann
                                            difference(all_fields)))
208 dcb93971 Michael Hanselmann
209 dcb93971 Michael Hanselmann
210 a8083063 Iustin Pop
def _UpdateEtcHosts(fullnode, ip):
211 a8083063 Iustin Pop
  """Ensure a node has a correct entry in /etc/hosts.
212 a8083063 Iustin Pop

213 a8083063 Iustin Pop
  Args:
214 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
215 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
216 a8083063 Iustin Pop

217 a8083063 Iustin Pop
  """
218 a8083063 Iustin Pop
  node = fullnode.split(".", 1)[0]
219 a8083063 Iustin Pop
220 a8083063 Iustin Pop
  f = open('/etc/hosts', 'r+')
221 a8083063 Iustin Pop
222 a8083063 Iustin Pop
  inthere = False
223 a8083063 Iustin Pop
224 a8083063 Iustin Pop
  save_lines = []
225 a8083063 Iustin Pop
  add_lines = []
226 a8083063 Iustin Pop
  removed = False
227 a8083063 Iustin Pop
228 a8083063 Iustin Pop
  while True:
229 a8083063 Iustin Pop
    rawline = f.readline()
230 a8083063 Iustin Pop
231 a8083063 Iustin Pop
    if not rawline:
232 a8083063 Iustin Pop
      # End of file
233 a8083063 Iustin Pop
      break
234 a8083063 Iustin Pop
235 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
236 a8083063 Iustin Pop
237 a8083063 Iustin Pop
    # Strip off comments
238 a8083063 Iustin Pop
    line = line.split('#')[0]
239 a8083063 Iustin Pop
240 a8083063 Iustin Pop
    if not line:
241 a8083063 Iustin Pop
      # Entire line was comment, skip
242 a8083063 Iustin Pop
      save_lines.append(rawline)
243 a8083063 Iustin Pop
      continue
244 a8083063 Iustin Pop
245 a8083063 Iustin Pop
    fields = line.split()
246 a8083063 Iustin Pop
247 a8083063 Iustin Pop
    haveall = True
248 a8083063 Iustin Pop
    havesome = False
249 a8083063 Iustin Pop
    for spec in [ ip, fullnode, node ]:
250 a8083063 Iustin Pop
      if spec not in fields:
251 a8083063 Iustin Pop
        haveall = False
252 a8083063 Iustin Pop
      if spec in fields:
253 a8083063 Iustin Pop
        havesome = True
254 a8083063 Iustin Pop
255 a8083063 Iustin Pop
    if haveall:
256 a8083063 Iustin Pop
      inthere = True
257 a8083063 Iustin Pop
      save_lines.append(rawline)
258 a8083063 Iustin Pop
      continue
259 a8083063 Iustin Pop
260 a8083063 Iustin Pop
    if havesome and not haveall:
261 a8083063 Iustin Pop
      # Line (old, or manual?) which is missing some.  Remove.
262 a8083063 Iustin Pop
      removed = True
263 a8083063 Iustin Pop
      continue
264 a8083063 Iustin Pop
265 a8083063 Iustin Pop
    save_lines.append(rawline)
266 a8083063 Iustin Pop
267 a8083063 Iustin Pop
  if not inthere:
268 a8083063 Iustin Pop
    add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
269 a8083063 Iustin Pop
270 a8083063 Iustin Pop
  if removed:
271 a8083063 Iustin Pop
    if add_lines:
272 a8083063 Iustin Pop
      save_lines = save_lines + add_lines
273 a8083063 Iustin Pop
274 a8083063 Iustin Pop
    # We removed a line, write a new file and replace old.
275 a8083063 Iustin Pop
    fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc')
276 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
277 a8083063 Iustin Pop
    newfile.write(''.join(save_lines))
278 a8083063 Iustin Pop
    newfile.close()
279 a8083063 Iustin Pop
    os.rename(tmpname, '/etc/hosts')
280 a8083063 Iustin Pop
281 a8083063 Iustin Pop
  elif add_lines:
282 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
283 a8083063 Iustin Pop
    f.seek(0, 2)
284 a8083063 Iustin Pop
    for add in add_lines:
285 a8083063 Iustin Pop
      f.write(add)
286 a8083063 Iustin Pop
287 a8083063 Iustin Pop
  f.close()
288 a8083063 Iustin Pop
289 a8083063 Iustin Pop
290 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
291 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
292 a8083063 Iustin Pop

293 a8083063 Iustin Pop
  Args:
294 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
295 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
296 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
297 a8083063 Iustin Pop

298 a8083063 Iustin Pop
  """
299 a8083063 Iustin Pop
  if os.path.exists('/etc/ssh/ssh_known_hosts'):
300 a8083063 Iustin Pop
    f = open('/etc/ssh/ssh_known_hosts', 'r+')
301 a8083063 Iustin Pop
  else:
302 a8083063 Iustin Pop
    f = open('/etc/ssh/ssh_known_hosts', 'w+')
303 a8083063 Iustin Pop
304 a8083063 Iustin Pop
  inthere = False
305 a8083063 Iustin Pop
306 a8083063 Iustin Pop
  save_lines = []
307 a8083063 Iustin Pop
  add_lines = []
308 a8083063 Iustin Pop
  removed = False
309 a8083063 Iustin Pop
310 a8083063 Iustin Pop
  while True:
311 a8083063 Iustin Pop
    rawline = f.readline()
312 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
313 a8083063 Iustin Pop
314 a8083063 Iustin Pop
    if not rawline:
315 a8083063 Iustin Pop
      # End of file
316 a8083063 Iustin Pop
      break
317 a8083063 Iustin Pop
318 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
319 a8083063 Iustin Pop
320 a8083063 Iustin Pop
    parts = line.split(' ')
321 a8083063 Iustin Pop
    fields = parts[0].split(',')
322 a8083063 Iustin Pop
    key = parts[2]
323 a8083063 Iustin Pop
324 a8083063 Iustin Pop
    haveall = True
325 a8083063 Iustin Pop
    havesome = False
326 a8083063 Iustin Pop
    for spec in [ ip, fullnode ]:
327 a8083063 Iustin Pop
      if spec not in fields:
328 a8083063 Iustin Pop
        haveall = False
329 a8083063 Iustin Pop
      if spec in fields:
330 a8083063 Iustin Pop
        havesome = True
331 a8083063 Iustin Pop
332 a8083063 Iustin Pop
    logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
333 a8083063 Iustin Pop
    if haveall and key == pubkey:
334 a8083063 Iustin Pop
      inthere = True
335 a8083063 Iustin Pop
      save_lines.append(rawline)
336 a8083063 Iustin Pop
      logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
337 a8083063 Iustin Pop
      continue
338 a8083063 Iustin Pop
339 a8083063 Iustin Pop
    if havesome and (not haveall or key != pubkey):
340 a8083063 Iustin Pop
      removed = True
341 a8083063 Iustin Pop
      logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
342 a8083063 Iustin Pop
      continue
343 a8083063 Iustin Pop
344 a8083063 Iustin Pop
    save_lines.append(rawline)
345 a8083063 Iustin Pop
346 a8083063 Iustin Pop
  if not inthere:
347 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
348 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
349 a8083063 Iustin Pop
350 a8083063 Iustin Pop
  if removed:
351 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
352 a8083063 Iustin Pop
353 a8083063 Iustin Pop
    # Write a new file and replace old.
354 a8083063 Iustin Pop
    fd, tmpname = tempfile.mkstemp('tmp', 'ssh_known_hosts_', '/etc/ssh')
355 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
356 a8083063 Iustin Pop
    newfile.write(''.join(save_lines))
357 a8083063 Iustin Pop
    newfile.close()
358 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
359 a8083063 Iustin Pop
    os.rename(tmpname, '/etc/ssh/ssh_known_hosts')
360 a8083063 Iustin Pop
361 a8083063 Iustin Pop
  elif add_lines:
362 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
363 a8083063 Iustin Pop
    f.seek(0, 2)
364 a8083063 Iustin Pop
    for add in add_lines:
365 a8083063 Iustin Pop
      f.write(add)
366 a8083063 Iustin Pop
367 a8083063 Iustin Pop
  f.close()
368 a8083063 Iustin Pop
369 a8083063 Iustin Pop
370 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
371 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
372 a8083063 Iustin Pop

373 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
374 a8083063 Iustin Pop
  is the error message.
375 a8083063 Iustin Pop

376 a8083063 Iustin Pop
  """
377 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
378 a8083063 Iustin Pop
  if vgsize is None:
379 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
380 a8083063 Iustin Pop
  elif vgsize < 20480:
381 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
382 191a8385 Guido Trotter
            (vgname, vgsize))
383 a8083063 Iustin Pop
  return None
384 a8083063 Iustin Pop
385 a8083063 Iustin Pop
386 a8083063 Iustin Pop
def _InitSSHSetup(node):
387 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
388 a8083063 Iustin Pop

389 a8083063 Iustin Pop

390 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
391 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
392 a8083063 Iustin Pop

393 a8083063 Iustin Pop
  Args:
394 a8083063 Iustin Pop
    node: the name of this host as a fqdn
395 a8083063 Iustin Pop

396 a8083063 Iustin Pop
  """
397 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/known_hosts')
398 a8083063 Iustin Pop
399 a8083063 Iustin Pop
  if os.path.exists('/root/.ssh/id_dsa'):
400 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa')
401 a8083063 Iustin Pop
  if os.path.exists('/root/.ssh/id_dsa.pub'):
402 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa.pub')
403 a8083063 Iustin Pop
404 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/id_dsa')
405 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/id_dsa.pub')
406 a8083063 Iustin Pop
407 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
408 a8083063 Iustin Pop
                         "-f", "/root/.ssh/id_dsa",
409 a8083063 Iustin Pop
                         "-q", "-N", ""])
410 a8083063 Iustin Pop
  if result.failed:
411 a8083063 Iustin Pop
    raise errors.OpExecError, ("could not generate ssh keypair, error %s" %
412 a8083063 Iustin Pop
                               result.output)
413 a8083063 Iustin Pop
414 a8083063 Iustin Pop
  f = open('/root/.ssh/id_dsa.pub', 'r')
415 a8083063 Iustin Pop
  try:
416 a8083063 Iustin Pop
    utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192))
417 a8083063 Iustin Pop
  finally:
418 a8083063 Iustin Pop
    f.close()
419 a8083063 Iustin Pop
420 a8083063 Iustin Pop
421 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
422 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
423 a8083063 Iustin Pop

424 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
425 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
426 a8083063 Iustin Pop

427 a8083063 Iustin Pop
  """
428 a8083063 Iustin Pop
  # Create pseudo random password
429 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
430 a8083063 Iustin Pop
  # and write it into sstore
431 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
432 a8083063 Iustin Pop
433 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
434 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
435 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
436 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
437 a8083063 Iustin Pop
  if result.failed:
438 a8083063 Iustin Pop
    raise errors.OpExecError, ("could not generate server ssl cert, command"
439 a8083063 Iustin Pop
                               " %s had exitcode %s and error message %s" %
440 a8083063 Iustin Pop
                               (result.cmd, result.exit_code, result.output))
441 a8083063 Iustin Pop
442 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
443 a8083063 Iustin Pop
444 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
445 a8083063 Iustin Pop
446 a8083063 Iustin Pop
  if result.failed:
447 a8083063 Iustin Pop
    raise errors.OpExecError, ("could not start the node daemon, command %s"
448 a8083063 Iustin Pop
                               " had exitcode %s and error %s" %
449 a8083063 Iustin Pop
                               (result.cmd, result.exit_code, result.output))
450 a8083063 Iustin Pop
451 a8083063 Iustin Pop
452 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
453 a8083063 Iustin Pop
  """Initialise the cluster.
454 a8083063 Iustin Pop

455 a8083063 Iustin Pop
  """
456 a8083063 Iustin Pop
  HPATH = "cluster-init"
457 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
458 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
459 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
460 a8083063 Iustin Pop
  REQ_CLUSTER = False
461 a8083063 Iustin Pop
462 a8083063 Iustin Pop
  def BuildHooksEnv(self):
463 a8083063 Iustin Pop
    """Build hooks env.
464 a8083063 Iustin Pop

465 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
466 a8083063 Iustin Pop
    ourselves in the post-run node list.
467 a8083063 Iustin Pop

468 a8083063 Iustin Pop
    """
469 a8083063 Iustin Pop
470 a8083063 Iustin Pop
    env = {"CLUSTER": self.op.cluster_name,
471 880478f8 Iustin Pop
           "MASTER": self.hostname['hostname_full']}
472 a8083063 Iustin Pop
    return env, [], [self.hostname['hostname_full']]
473 a8083063 Iustin Pop
474 a8083063 Iustin Pop
  def CheckPrereq(self):
475 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
476 a8083063 Iustin Pop

477 a8083063 Iustin Pop
    """
478 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
479 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Cluster is already initialised")
480 a8083063 Iustin Pop
481 a8083063 Iustin Pop
    hostname_local = socket.gethostname()
482 a8083063 Iustin Pop
    self.hostname = hostname = utils.LookupHostname(hostname_local)
483 a8083063 Iustin Pop
    if not hostname:
484 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Cannot resolve my own hostname ('%s')" %
485 a8083063 Iustin Pop
                                   hostname_local)
486 a8083063 Iustin Pop
487 a8083063 Iustin Pop
    self.clustername = clustername = utils.LookupHostname(self.op.cluster_name)
488 a8083063 Iustin Pop
    if not clustername:
489 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Cannot resolve given cluster name ('%s')"
490 a8083063 Iustin Pop
                                   % self.op.cluster_name)
491 a8083063 Iustin Pop
492 a8083063 Iustin Pop
    result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname['ip']])
493 a8083063 Iustin Pop
    if result.failed:
494 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Inconsistency: this host's name resolves"
495 a8083063 Iustin Pop
                                   " to %s,\nbut this ip address does not"
496 a8083063 Iustin Pop
                                   " belong to this host."
497 a8083063 Iustin Pop
                                   " Aborting." % hostname['ip'])
498 a8083063 Iustin Pop
499 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
500 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
501 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid secondary ip given")
502 a8083063 Iustin Pop
    if secondary_ip and secondary_ip != hostname['ip']:
503 a8083063 Iustin Pop
      result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip])
504 a8083063 Iustin Pop
      if result.failed:
505 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("You gave %s as secondary IP,\n"
506 a8083063 Iustin Pop
                                     "but it does not belong to this host." %
507 a8083063 Iustin Pop
                                     secondary_ip)
508 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
509 a8083063 Iustin Pop
510 a8083063 Iustin Pop
    # checks presence of the volume group given
511 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
512 a8083063 Iustin Pop
513 a8083063 Iustin Pop
    if vgstatus:
514 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Error: %s" % vgstatus)
515 a8083063 Iustin Pop
516 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
517 a8083063 Iustin Pop
                    self.op.mac_prefix):
518 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid mac prefix given '%s'" %
519 a8083063 Iustin Pop
                                   self.op.mac_prefix)
520 a8083063 Iustin Pop
521 a8083063 Iustin Pop
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
522 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid hypervisor type given '%s'" %
523 a8083063 Iustin Pop
                                   self.op.hypervisor_type)
524 a8083063 Iustin Pop
525 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
526 880478f8 Iustin Pop
    if result.failed:
527 880478f8 Iustin Pop
      raise errors.OpPrereqError, ("Invalid master netdev given (%s): '%s'" %
528 880478f8 Iustin Pop
                                   (self.op.master_netdev, result.output))
529 880478f8 Iustin Pop
530 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
531 a8083063 Iustin Pop
    """Initialize the cluster.
532 a8083063 Iustin Pop

533 a8083063 Iustin Pop
    """
534 a8083063 Iustin Pop
    clustername = self.clustername
535 a8083063 Iustin Pop
    hostname = self.hostname
536 a8083063 Iustin Pop
537 a8083063 Iustin Pop
    # set up the simple store
538 a8083063 Iustin Pop
    ss = ssconf.SimpleStore()
539 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
540 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname['hostname_full'])
541 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername['ip'])
542 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
543 a8083063 Iustin Pop
544 a8083063 Iustin Pop
    # set up the inter-node password and certificate
545 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
546 a8083063 Iustin Pop
547 a8083063 Iustin Pop
    # start the master ip
548 a8083063 Iustin Pop
    rpc.call_node_start_master(hostname['hostname_full'])
549 a8083063 Iustin Pop
550 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
551 a8083063 Iustin Pop
    f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r')
552 a8083063 Iustin Pop
    try:
553 a8083063 Iustin Pop
      sshline = f.read()
554 a8083063 Iustin Pop
    finally:
555 a8083063 Iustin Pop
      f.close()
556 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
557 a8083063 Iustin Pop
558 a8083063 Iustin Pop
    _UpdateEtcHosts(hostname['hostname_full'],
559 a8083063 Iustin Pop
                    hostname['ip'],
560 a8083063 Iustin Pop
                    )
561 a8083063 Iustin Pop
562 a8083063 Iustin Pop
    _UpdateKnownHosts(hostname['hostname_full'],
563 a8083063 Iustin Pop
                      hostname['ip'],
564 a8083063 Iustin Pop
                      sshkey,
565 a8083063 Iustin Pop
                      )
566 a8083063 Iustin Pop
567 a8083063 Iustin Pop
    _InitSSHSetup(hostname['hostname'])
568 a8083063 Iustin Pop
569 a8083063 Iustin Pop
    # init of cluster config file
570 a8083063 Iustin Pop
    cfgw = config.ConfigWriter()
571 a8083063 Iustin Pop
    cfgw.InitConfig(hostname['hostname'], hostname['ip'], self.secondary_ip,
572 a8083063 Iustin Pop
                    clustername['hostname'], sshkey, self.op.mac_prefix,
573 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
574 a8083063 Iustin Pop
575 a8083063 Iustin Pop
576 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
577 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
578 a8083063 Iustin Pop

579 a8083063 Iustin Pop
  """
580 a8083063 Iustin Pop
  _OP_REQP = []
581 a8083063 Iustin Pop
582 a8083063 Iustin Pop
  def CheckPrereq(self):
583 a8083063 Iustin Pop
    """Check prerequisites.
584 a8083063 Iustin Pop

585 a8083063 Iustin Pop
    This checks whether the cluster is empty.
586 a8083063 Iustin Pop

587 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
588 a8083063 Iustin Pop

589 a8083063 Iustin Pop
    """
590 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
591 a8083063 Iustin Pop
592 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
593 a8083063 Iustin Pop
    if len(nodelist) > 0 and nodelist != [master]:
594 880478f8 Iustin Pop
      raise errors.OpPrereqError, ("There are still %d node(s) in "
595 880478f8 Iustin Pop
                                   "this cluster." % (len(nodelist) - 1))
596 a8083063 Iustin Pop
597 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
598 a8083063 Iustin Pop
    """Destroys the cluster.
599 a8083063 Iustin Pop

600 a8083063 Iustin Pop
    """
601 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa')
602 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa.pub')
603 880478f8 Iustin Pop
    rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
604 a8083063 Iustin Pop
605 a8083063 Iustin Pop
606 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
607 a8083063 Iustin Pop
  """Verifies the cluster status.
608 a8083063 Iustin Pop

609 a8083063 Iustin Pop
  """
610 a8083063 Iustin Pop
  _OP_REQP = []
611 a8083063 Iustin Pop
612 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
613 a8083063 Iustin Pop
                  remote_version, feedback_fn):
614 a8083063 Iustin Pop
    """Run multiple tests against a node.
615 a8083063 Iustin Pop

616 a8083063 Iustin Pop
    Test list:
617 a8083063 Iustin Pop
      - compares ganeti version
618 a8083063 Iustin Pop
      - checks vg existance and size > 20G
619 a8083063 Iustin Pop
      - checks config file checksum
620 a8083063 Iustin Pop
      - checks ssh to other nodes
621 a8083063 Iustin Pop

622 a8083063 Iustin Pop
    Args:
623 a8083063 Iustin Pop
      node: name of the node to check
624 a8083063 Iustin Pop
      file_list: required list of files
625 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
626 a8083063 Iustin Pop
    """
627 a8083063 Iustin Pop
    # compares ganeti version
628 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
629 a8083063 Iustin Pop
    if not remote_version:
630 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
631 a8083063 Iustin Pop
      return True
632 a8083063 Iustin Pop
633 a8083063 Iustin Pop
    if local_version != remote_version:
634 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
635 a8083063 Iustin Pop
                      (local_version, node, remote_version))
636 a8083063 Iustin Pop
      return True
637 a8083063 Iustin Pop
638 a8083063 Iustin Pop
    # checks vg existance and size > 20G
639 a8083063 Iustin Pop
640 a8083063 Iustin Pop
    bad = False
641 a8083063 Iustin Pop
    if not vglist:
642 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
643 a8083063 Iustin Pop
                      (node,))
644 a8083063 Iustin Pop
      bad = True
645 a8083063 Iustin Pop
    else:
646 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
647 a8083063 Iustin Pop
      if vgstatus:
648 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
649 a8083063 Iustin Pop
        bad = True
650 a8083063 Iustin Pop
651 a8083063 Iustin Pop
    # checks config file checksum
652 a8083063 Iustin Pop
    # checks ssh to any
653 a8083063 Iustin Pop
654 a8083063 Iustin Pop
    if 'filelist' not in node_result:
655 a8083063 Iustin Pop
      bad = True
656 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
657 a8083063 Iustin Pop
    else:
658 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
659 a8083063 Iustin Pop
      for file_name in file_list:
660 a8083063 Iustin Pop
        if file_name not in remote_cksum:
661 a8083063 Iustin Pop
          bad = True
662 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
663 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
664 a8083063 Iustin Pop
          bad = True
665 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
666 a8083063 Iustin Pop
667 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
668 a8083063 Iustin Pop
      bad = True
669 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
670 a8083063 Iustin Pop
    else:
671 a8083063 Iustin Pop
      if node_result['nodelist']:
672 a8083063 Iustin Pop
        bad = True
673 a8083063 Iustin Pop
        for node in node_result['nodelist']:
674 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
675 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
676 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
677 a8083063 Iustin Pop
    if hyp_result is not None:
678 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
679 a8083063 Iustin Pop
    return bad
680 a8083063 Iustin Pop
681 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
682 a8083063 Iustin Pop
    """Verify an instance.
683 a8083063 Iustin Pop

684 a8083063 Iustin Pop
    This function checks to see if the required block devices are
685 a8083063 Iustin Pop
    available on the instance's node.
686 a8083063 Iustin Pop

687 a8083063 Iustin Pop
    """
688 a8083063 Iustin Pop
    bad = False
689 a8083063 Iustin Pop
690 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
691 a8083063 Iustin Pop
    if not instance in instancelist:
692 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
693 a8083063 Iustin Pop
                      (instance, instancelist))
694 a8083063 Iustin Pop
      bad = True
695 a8083063 Iustin Pop
696 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
697 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
698 a8083063 Iustin Pop
699 a8083063 Iustin Pop
    node_vol_should = {}
700 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
701 a8083063 Iustin Pop
702 a8083063 Iustin Pop
    for node in node_vol_should:
703 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
704 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
705 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
706 a8083063 Iustin Pop
                          (volume, node))
707 a8083063 Iustin Pop
          bad = True
708 a8083063 Iustin Pop
709 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
710 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
711 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
712 a8083063 Iustin Pop
                        (instance, node_current))
713 a8083063 Iustin Pop
        bad = True
714 a8083063 Iustin Pop
715 a8083063 Iustin Pop
    for node in node_instance:
716 a8083063 Iustin Pop
      if (not node == node_current):
717 a8083063 Iustin Pop
        if instance in node_instance[node]:
718 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
719 a8083063 Iustin Pop
                          (instance, node))
720 a8083063 Iustin Pop
          bad = True
721 a8083063 Iustin Pop
722 a8083063 Iustin Pop
    return not bad
723 a8083063 Iustin Pop
724 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
725 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
726 a8083063 Iustin Pop

727 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
728 a8083063 Iustin Pop
    reported as unknown.
729 a8083063 Iustin Pop

730 a8083063 Iustin Pop
    """
731 a8083063 Iustin Pop
    bad = False
732 a8083063 Iustin Pop
733 a8083063 Iustin Pop
    for node in node_vol_is:
734 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
735 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
736 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
737 a8083063 Iustin Pop
                      (volume, node))
738 a8083063 Iustin Pop
          bad = True
739 a8083063 Iustin Pop
    return bad
740 a8083063 Iustin Pop
741 a8083063 Iustin Pop
742 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
743 a8083063 Iustin Pop
    """Verify the list of running instances.
744 a8083063 Iustin Pop

745 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
746 a8083063 Iustin Pop

747 a8083063 Iustin Pop
    """
748 a8083063 Iustin Pop
    bad = False
749 a8083063 Iustin Pop
    for node in node_instance:
750 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
751 a8083063 Iustin Pop
        if runninginstance not in instancelist:
752 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
753 a8083063 Iustin Pop
                          (runninginstance, node))
754 a8083063 Iustin Pop
          bad = True
755 a8083063 Iustin Pop
    return bad
756 a8083063 Iustin Pop
757 a8083063 Iustin Pop
  def _VerifyNodeConfigFiles(self, ismaster, node, file_list, feedback_fn):
758 a8083063 Iustin Pop
    """Verify the list of node config files"""
759 a8083063 Iustin Pop
760 a8083063 Iustin Pop
    bad = False
761 a8083063 Iustin Pop
    for file_name in constants.MASTER_CONFIGFILES:
762 a8083063 Iustin Pop
      if ismaster and file_name not in file_list:
763 a8083063 Iustin Pop
        feedback_fn("  - ERROR: master config file %s missing from master"
764 a8083063 Iustin Pop
                    " node %s" % (file_name, node))
765 a8083063 Iustin Pop
        bad = True
766 a8083063 Iustin Pop
      elif not ismaster and file_name in file_list:
767 a8083063 Iustin Pop
        feedback_fn("  - ERROR: master config file %s should not exist"
768 a8083063 Iustin Pop
                    " on non-master node %s" % (file_name, node))
769 a8083063 Iustin Pop
        bad = True
770 a8083063 Iustin Pop
771 a8083063 Iustin Pop
    for file_name in constants.NODE_CONFIGFILES:
772 a8083063 Iustin Pop
      if file_name not in file_list:
773 a8083063 Iustin Pop
        feedback_fn("  - ERROR: config file %s missing from node %s" %
774 a8083063 Iustin Pop
                    (file_name, node))
775 a8083063 Iustin Pop
        bad = True
776 a8083063 Iustin Pop
777 a8083063 Iustin Pop
    return bad
778 a8083063 Iustin Pop
779 a8083063 Iustin Pop
  def CheckPrereq(self):
780 a8083063 Iustin Pop
    """Check prerequisites.
781 a8083063 Iustin Pop

782 a8083063 Iustin Pop
    This has no prerequisites.
783 a8083063 Iustin Pop

784 a8083063 Iustin Pop
    """
785 a8083063 Iustin Pop
    pass
786 a8083063 Iustin Pop
787 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
788 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
789 a8083063 Iustin Pop

790 a8083063 Iustin Pop
    """
791 a8083063 Iustin Pop
    bad = False
792 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
793 a8083063 Iustin Pop
    self.cfg.VerifyConfig()
794 a8083063 Iustin Pop
795 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
796 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
797 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
798 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
799 a8083063 Iustin Pop
    node_volume = {}
800 a8083063 Iustin Pop
    node_instance = {}
801 a8083063 Iustin Pop
802 a8083063 Iustin Pop
    # FIXME: verify OS list
803 a8083063 Iustin Pop
    # do local checksums
804 a8083063 Iustin Pop
    file_names = constants.CLUSTER_CONF_FILES
805 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
806 a8083063 Iustin Pop
807 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
808 a8083063 Iustin Pop
    all_configfile = rpc.call_configfile_list(nodelist)
809 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
810 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
811 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
812 a8083063 Iustin Pop
    node_verify_param = {
813 a8083063 Iustin Pop
      'filelist': file_names,
814 a8083063 Iustin Pop
      'nodelist': nodelist,
815 a8083063 Iustin Pop
      'hypervisor': None,
816 a8083063 Iustin Pop
      }
817 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
818 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
819 a8083063 Iustin Pop
820 a8083063 Iustin Pop
    for node in nodelist:
821 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
822 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
823 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
824 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
825 a8083063 Iustin Pop
      bad = bad or result
826 a8083063 Iustin Pop
      # node_configfile
827 a8083063 Iustin Pop
      nodeconfigfile = all_configfile[node]
828 a8083063 Iustin Pop
829 a8083063 Iustin Pop
      if not nodeconfigfile:
830 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node))
831 a8083063 Iustin Pop
        bad = True
832 a8083063 Iustin Pop
        continue
833 a8083063 Iustin Pop
834 a8083063 Iustin Pop
      bad = bad or self._VerifyNodeConfigFiles(node==master, node,
835 a8083063 Iustin Pop
                                               nodeconfigfile, feedback_fn)
836 a8083063 Iustin Pop
837 a8083063 Iustin Pop
      # node_volume
838 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
839 a8083063 Iustin Pop
840 a8083063 Iustin Pop
      if type(volumeinfo) != dict:
841 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
842 a8083063 Iustin Pop
        bad = True
843 a8083063 Iustin Pop
        continue
844 a8083063 Iustin Pop
845 a8083063 Iustin Pop
      node_volume[node] = volumeinfo
846 a8083063 Iustin Pop
847 a8083063 Iustin Pop
      # node_instance
848 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
849 a8083063 Iustin Pop
      if type(nodeinstance) != list:
850 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
851 a8083063 Iustin Pop
        bad = True
852 a8083063 Iustin Pop
        continue
853 a8083063 Iustin Pop
854 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
855 a8083063 Iustin Pop
856 a8083063 Iustin Pop
    node_vol_should = {}
857 a8083063 Iustin Pop
858 a8083063 Iustin Pop
    for instance in instancelist:
859 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
860 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
861 a8083063 Iustin Pop
                                     feedback_fn)
862 a8083063 Iustin Pop
      bad = bad or result
863 a8083063 Iustin Pop
864 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
865 a8083063 Iustin Pop
866 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
867 a8083063 Iustin Pop
868 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
869 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
870 a8083063 Iustin Pop
                                       feedback_fn)
871 a8083063 Iustin Pop
    bad = bad or result
872 a8083063 Iustin Pop
873 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
874 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
875 a8083063 Iustin Pop
                                         feedback_fn)
876 a8083063 Iustin Pop
    bad = bad or result
877 a8083063 Iustin Pop
878 a8083063 Iustin Pop
    return int(bad)
879 a8083063 Iustin Pop
880 a8083063 Iustin Pop
881 a8083063 Iustin Pop
def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
882 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
883 a8083063 Iustin Pop

884 a8083063 Iustin Pop
  """
885 a8083063 Iustin Pop
  if not instance.disks:
886 a8083063 Iustin Pop
    return True
887 a8083063 Iustin Pop
888 a8083063 Iustin Pop
  if not oneshot:
889 a8083063 Iustin Pop
    logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
890 a8083063 Iustin Pop
891 a8083063 Iustin Pop
  node = instance.primary_node
892 a8083063 Iustin Pop
893 a8083063 Iustin Pop
  for dev in instance.disks:
894 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
895 a8083063 Iustin Pop
896 a8083063 Iustin Pop
  retries = 0
897 a8083063 Iustin Pop
  while True:
898 a8083063 Iustin Pop
    max_time = 0
899 a8083063 Iustin Pop
    done = True
900 a8083063 Iustin Pop
    cumul_degraded = False
901 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
902 a8083063 Iustin Pop
    if not rstats:
903 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
904 a8083063 Iustin Pop
      retries += 1
905 a8083063 Iustin Pop
      if retries >= 10:
906 a8083063 Iustin Pop
        raise errors.RemoteError, ("Can't contact node %s for mirror data,"
907 a8083063 Iustin Pop
                                   " aborting." % node)
908 a8083063 Iustin Pop
      time.sleep(6)
909 a8083063 Iustin Pop
      continue
910 a8083063 Iustin Pop
    retries = 0
911 a8083063 Iustin Pop
    for i in range(len(rstats)):
912 a8083063 Iustin Pop
      mstat = rstats[i]
913 a8083063 Iustin Pop
      if mstat is None:
914 a8083063 Iustin Pop
        logger.ToStderr("Can't compute data for node %s/%s" %
915 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
916 a8083063 Iustin Pop
        continue
917 a8083063 Iustin Pop
      perc_done, est_time, is_degraded = mstat
918 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
919 a8083063 Iustin Pop
      if perc_done is not None:
920 a8083063 Iustin Pop
        done = False
921 a8083063 Iustin Pop
        if est_time is not None:
922 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
923 a8083063 Iustin Pop
          max_time = est_time
924 a8083063 Iustin Pop
        else:
925 a8083063 Iustin Pop
          rem_time = "no time estimate"
926 a8083063 Iustin Pop
        logger.ToStdout("- device %s: %5.2f%% done, %s" %
927 a8083063 Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
928 a8083063 Iustin Pop
    if done or oneshot:
929 a8083063 Iustin Pop
      break
930 a8083063 Iustin Pop
931 a8083063 Iustin Pop
    if unlock:
932 a8083063 Iustin Pop
      utils.Unlock('cmd')
933 a8083063 Iustin Pop
    try:
934 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
935 a8083063 Iustin Pop
    finally:
936 a8083063 Iustin Pop
      if unlock:
937 a8083063 Iustin Pop
        utils.Lock('cmd')
938 a8083063 Iustin Pop
939 a8083063 Iustin Pop
  if done:
940 a8083063 Iustin Pop
    logger.ToStdout("Instance %s's disks are in sync." % instance.name)
941 a8083063 Iustin Pop
  return not cumul_degraded
942 a8083063 Iustin Pop
943 a8083063 Iustin Pop
944 a8083063 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary):
945 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
946 a8083063 Iustin Pop

947 a8083063 Iustin Pop
  """
948 a8083063 Iustin Pop
949 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
950 a8083063 Iustin Pop
951 a8083063 Iustin Pop
  result = True
952 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
953 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
954 a8083063 Iustin Pop
    if not rstats:
955 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
956 a8083063 Iustin Pop
      result = False
957 a8083063 Iustin Pop
    else:
958 a8083063 Iustin Pop
      result = result and (not rstats[5])
959 a8083063 Iustin Pop
  if dev.children:
960 a8083063 Iustin Pop
    for child in dev.children:
961 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
962 a8083063 Iustin Pop
963 a8083063 Iustin Pop
  return result
964 a8083063 Iustin Pop
965 a8083063 Iustin Pop
966 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
967 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
968 a8083063 Iustin Pop

969 a8083063 Iustin Pop
  """
970 a8083063 Iustin Pop
  _OP_REQP = []
971 a8083063 Iustin Pop
972 a8083063 Iustin Pop
  def CheckPrereq(self):
973 a8083063 Iustin Pop
    """Check prerequisites.
974 a8083063 Iustin Pop

975 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
976 a8083063 Iustin Pop

977 a8083063 Iustin Pop
    """
978 a8083063 Iustin Pop
    return
979 a8083063 Iustin Pop
980 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
981 a8083063 Iustin Pop
    """Compute the list of OSes.
982 a8083063 Iustin Pop

983 a8083063 Iustin Pop
    """
984 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
985 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
986 a8083063 Iustin Pop
    if node_data == False:
987 a8083063 Iustin Pop
      raise errors.OpExecError, "Can't gather the list of OSes"
988 a8083063 Iustin Pop
    return node_data
989 a8083063 Iustin Pop
990 a8083063 Iustin Pop
991 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
992 a8083063 Iustin Pop
  """Logical unit for removing a node.
993 a8083063 Iustin Pop

994 a8083063 Iustin Pop
  """
995 a8083063 Iustin Pop
  HPATH = "node-remove"
996 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
997 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
998 a8083063 Iustin Pop
999 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1000 a8083063 Iustin Pop
    """Build hooks env.
1001 a8083063 Iustin Pop

1002 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1003 a8083063 Iustin Pop
    node would not allows itself to run.
1004 a8083063 Iustin Pop

1005 a8083063 Iustin Pop
    """
1006 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1007 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1008 a8083063 Iustin Pop
    return {"NODE_NAME": self.op.node_name}, all_nodes, all_nodes
1009 a8083063 Iustin Pop
1010 a8083063 Iustin Pop
  def CheckPrereq(self):
1011 a8083063 Iustin Pop
    """Check prerequisites.
1012 a8083063 Iustin Pop

1013 a8083063 Iustin Pop
    This checks:
1014 a8083063 Iustin Pop
     - the node exists in the configuration
1015 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1016 a8083063 Iustin Pop
     - it's not the master
1017 a8083063 Iustin Pop

1018 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1019 a8083063 Iustin Pop

1020 a8083063 Iustin Pop
    """
1021 a8083063 Iustin Pop
1022 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1023 a8083063 Iustin Pop
    if node is None:
1024 a8083063 Iustin Pop
      logger.Error("Error: Node '%s' is unknown." % self.op.node_name)
1025 a8083063 Iustin Pop
      return 1
1026 a8083063 Iustin Pop
1027 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1028 a8083063 Iustin Pop
1029 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1030 a8083063 Iustin Pop
    if node.name == masternode:
1031 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node is the master node,"
1032 a8083063 Iustin Pop
                                   " you need to failover first.")
1033 a8083063 Iustin Pop
1034 a8083063 Iustin Pop
    for instance_name in instance_list:
1035 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1036 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1037 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Instance %s still running on the node,"
1038 a8083063 Iustin Pop
                                     " please remove first." % instance_name)
1039 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1040 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Instance %s has node as a secondary,"
1041 a8083063 Iustin Pop
                                     " please remove first." % instance_name)
1042 a8083063 Iustin Pop
    self.op.node_name = node.name
1043 a8083063 Iustin Pop
    self.node = node
1044 a8083063 Iustin Pop
1045 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1046 a8083063 Iustin Pop
    """Removes the node from the cluster.
1047 a8083063 Iustin Pop

1048 a8083063 Iustin Pop
    """
1049 a8083063 Iustin Pop
    node = self.node
1050 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1051 a8083063 Iustin Pop
                node.name)
1052 a8083063 Iustin Pop
1053 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1054 a8083063 Iustin Pop
1055 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1056 a8083063 Iustin Pop
1057 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1058 a8083063 Iustin Pop
1059 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1060 a8083063 Iustin Pop
1061 a8083063 Iustin Pop
1062 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1063 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1064 a8083063 Iustin Pop

1065 a8083063 Iustin Pop
  """
1066 a8083063 Iustin Pop
  _OP_REQP = ["output_fields"]
1067 a8083063 Iustin Pop
1068 a8083063 Iustin Pop
  def CheckPrereq(self):
1069 a8083063 Iustin Pop
    """Check prerequisites.
1070 a8083063 Iustin Pop

1071 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1072 a8083063 Iustin Pop

1073 a8083063 Iustin Pop
    """
1074 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1075 a8083063 Iustin Pop
                                     "mtotal", "mnode", "mfree"])
1076 a8083063 Iustin Pop
1077 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "pinst", "sinst", "pip", "sip"],
1078 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1079 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1080 a8083063 Iustin Pop
1081 a8083063 Iustin Pop
1082 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1083 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1084 a8083063 Iustin Pop

1085 a8083063 Iustin Pop
    """
1086 a8083063 Iustin Pop
    nodenames = utils.NiceSort(self.cfg.GetNodeList())
1087 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1088 a8083063 Iustin Pop
1089 a8083063 Iustin Pop
1090 a8083063 Iustin Pop
    # begin data gathering
1091 a8083063 Iustin Pop
1092 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1093 a8083063 Iustin Pop
      live_data = {}
1094 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1095 a8083063 Iustin Pop
      for name in nodenames:
1096 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1097 a8083063 Iustin Pop
        if nodeinfo:
1098 a8083063 Iustin Pop
          live_data[name] = {
1099 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1100 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1101 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1102 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1103 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1104 a8083063 Iustin Pop
            }
1105 a8083063 Iustin Pop
        else:
1106 a8083063 Iustin Pop
          live_data[name] = {}
1107 a8083063 Iustin Pop
    else:
1108 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1109 a8083063 Iustin Pop
1110 a8083063 Iustin Pop
    node_to_primary = dict.fromkeys(nodenames, 0)
1111 a8083063 Iustin Pop
    node_to_secondary = dict.fromkeys(nodenames, 0)
1112 a8083063 Iustin Pop
1113 a8083063 Iustin Pop
    if "pinst" in self.op.output_fields or "sinst" in self.op.output_fields:
1114 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1115 a8083063 Iustin Pop
1116 a8083063 Iustin Pop
      for instance in instancelist:
1117 a8083063 Iustin Pop
        instanceinfo = self.cfg.GetInstanceInfo(instance)
1118 a8083063 Iustin Pop
        node_to_primary[instanceinfo.primary_node] += 1
1119 a8083063 Iustin Pop
        for secnode in instanceinfo.secondary_nodes:
1120 a8083063 Iustin Pop
          node_to_secondary[secnode] += 1
1121 a8083063 Iustin Pop
1122 a8083063 Iustin Pop
    # end data gathering
1123 a8083063 Iustin Pop
1124 a8083063 Iustin Pop
    output = []
1125 a8083063 Iustin Pop
    for node in nodelist:
1126 a8083063 Iustin Pop
      node_output = []
1127 a8083063 Iustin Pop
      for field in self.op.output_fields:
1128 a8083063 Iustin Pop
        if field == "name":
1129 a8083063 Iustin Pop
          val = node.name
1130 a8083063 Iustin Pop
        elif field == "pinst":
1131 a8083063 Iustin Pop
          val = node_to_primary[node.name]
1132 a8083063 Iustin Pop
        elif field == "sinst":
1133 a8083063 Iustin Pop
          val = node_to_secondary[node.name]
1134 a8083063 Iustin Pop
        elif field == "pip":
1135 a8083063 Iustin Pop
          val = node.primary_ip
1136 a8083063 Iustin Pop
        elif field == "sip":
1137 a8083063 Iustin Pop
          val = node.secondary_ip
1138 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1139 a8083063 Iustin Pop
          val = live_data[node.name].get(field, "?")
1140 a8083063 Iustin Pop
        else:
1141 a8083063 Iustin Pop
          raise errors.ParameterError, field
1142 a8083063 Iustin Pop
        val = str(val)
1143 a8083063 Iustin Pop
        node_output.append(val)
1144 a8083063 Iustin Pop
      output.append(node_output)
1145 a8083063 Iustin Pop
1146 a8083063 Iustin Pop
    return output
1147 a8083063 Iustin Pop
1148 a8083063 Iustin Pop
1149 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1150 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1151 dcb93971 Michael Hanselmann

1152 dcb93971 Michael Hanselmann
  """
1153 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1154 dcb93971 Michael Hanselmann
1155 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1156 dcb93971 Michael Hanselmann
    """Check prerequisites.
1157 dcb93971 Michael Hanselmann

1158 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1159 dcb93971 Michael Hanselmann

1160 dcb93971 Michael Hanselmann
    """
1161 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1162 dcb93971 Michael Hanselmann
1163 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1164 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1165 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1166 dcb93971 Michael Hanselmann
1167 dcb93971 Michael Hanselmann
1168 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1169 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1170 dcb93971 Michael Hanselmann

1171 dcb93971 Michael Hanselmann
    """
1172 dcb93971 Michael Hanselmann
    nodenames = utils.NiceSort([node.name for node in self.nodes])
1173 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1174 dcb93971 Michael Hanselmann
1175 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1176 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1177 dcb93971 Michael Hanselmann
1178 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1179 dcb93971 Michael Hanselmann
1180 dcb93971 Michael Hanselmann
    output = []
1181 dcb93971 Michael Hanselmann
    for node in nodenames:
1182 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1183 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1184 dcb93971 Michael Hanselmann
1185 dcb93971 Michael Hanselmann
      for vol in node_vols:
1186 dcb93971 Michael Hanselmann
        node_output = []
1187 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1188 dcb93971 Michael Hanselmann
          if field == "node":
1189 dcb93971 Michael Hanselmann
            val = node
1190 dcb93971 Michael Hanselmann
          elif field == "phys":
1191 dcb93971 Michael Hanselmann
            val = vol['dev']
1192 dcb93971 Michael Hanselmann
          elif field == "vg":
1193 dcb93971 Michael Hanselmann
            val = vol['vg']
1194 dcb93971 Michael Hanselmann
          elif field == "name":
1195 dcb93971 Michael Hanselmann
            val = vol['name']
1196 dcb93971 Michael Hanselmann
          elif field == "size":
1197 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1198 dcb93971 Michael Hanselmann
          elif field == "instance":
1199 dcb93971 Michael Hanselmann
            for inst in ilist:
1200 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1201 dcb93971 Michael Hanselmann
                continue
1202 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1203 dcb93971 Michael Hanselmann
                val = inst.name
1204 dcb93971 Michael Hanselmann
                break
1205 dcb93971 Michael Hanselmann
            else:
1206 dcb93971 Michael Hanselmann
              val = '-'
1207 dcb93971 Michael Hanselmann
          else:
1208 dcb93971 Michael Hanselmann
            raise errors.ParameterError, field
1209 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1210 dcb93971 Michael Hanselmann
1211 dcb93971 Michael Hanselmann
        output.append(node_output)
1212 dcb93971 Michael Hanselmann
1213 dcb93971 Michael Hanselmann
    return output
1214 dcb93971 Michael Hanselmann
1215 dcb93971 Michael Hanselmann
1216 a8083063 Iustin Pop
def _CheckNodesDirs(node_list, paths):
1217 a8083063 Iustin Pop
  """Verify if the given nodes have the same files.
1218 a8083063 Iustin Pop

1219 a8083063 Iustin Pop
  Args:
1220 a8083063 Iustin Pop
    node_list: the list of node names to check
1221 a8083063 Iustin Pop
    paths: the list of directories to checksum and compare
1222 a8083063 Iustin Pop

1223 a8083063 Iustin Pop
  Returns:
1224 a8083063 Iustin Pop
    list of (node, different_file, message); if empty, the files are in sync
1225 a8083063 Iustin Pop

1226 a8083063 Iustin Pop
  """
1227 a8083063 Iustin Pop
  file_names = []
1228 a8083063 Iustin Pop
  for dir_name in paths:
1229 a8083063 Iustin Pop
    flist = [os.path.join(dir_name, name) for name in os.listdir(dir_name)]
1230 a8083063 Iustin Pop
    flist = [name for name in flist if os.path.isfile(name)]
1231 a8083063 Iustin Pop
    file_names.extend(flist)
1232 a8083063 Iustin Pop
1233 a8083063 Iustin Pop
  local_checksums = utils.FingerprintFiles(file_names)
1234 a8083063 Iustin Pop
1235 a8083063 Iustin Pop
  results = []
1236 a8083063 Iustin Pop
  verify_params = {'filelist': file_names}
1237 a8083063 Iustin Pop
  all_node_results = rpc.call_node_verify(node_list, verify_params)
1238 a8083063 Iustin Pop
  for node_name in node_list:
1239 a8083063 Iustin Pop
    node_result = all_node_results.get(node_name, False)
1240 a8083063 Iustin Pop
    if not node_result or 'filelist' not in node_result:
1241 a8083063 Iustin Pop
      results.append((node_name, "'all files'", "node communication error"))
1242 a8083063 Iustin Pop
      continue
1243 a8083063 Iustin Pop
    remote_checksums = node_result['filelist']
1244 a8083063 Iustin Pop
    for fname in local_checksums:
1245 a8083063 Iustin Pop
      if fname not in remote_checksums:
1246 a8083063 Iustin Pop
        results.append((node_name, fname, "missing file"))
1247 a8083063 Iustin Pop
      elif remote_checksums[fname] != local_checksums[fname]:
1248 a8083063 Iustin Pop
        results.append((node_name, fname, "wrong checksum"))
1249 a8083063 Iustin Pop
  return results
1250 a8083063 Iustin Pop
1251 a8083063 Iustin Pop
1252 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1253 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1254 a8083063 Iustin Pop

1255 a8083063 Iustin Pop
  """
1256 a8083063 Iustin Pop
  HPATH = "node-add"
1257 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1258 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1259 a8083063 Iustin Pop
1260 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1261 a8083063 Iustin Pop
    """Build hooks env.
1262 a8083063 Iustin Pop

1263 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1264 a8083063 Iustin Pop

1265 a8083063 Iustin Pop
    """
1266 a8083063 Iustin Pop
    env = {
1267 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1268 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1269 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1270 a8083063 Iustin Pop
      }
1271 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1272 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1273 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1274 a8083063 Iustin Pop
1275 a8083063 Iustin Pop
  def CheckPrereq(self):
1276 a8083063 Iustin Pop
    """Check prerequisites.
1277 a8083063 Iustin Pop

1278 a8083063 Iustin Pop
    This checks:
1279 a8083063 Iustin Pop
     - the new node is not already in the config
1280 a8083063 Iustin Pop
     - it is resolvable
1281 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1282 a8083063 Iustin Pop

1283 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1284 a8083063 Iustin Pop

1285 a8083063 Iustin Pop
    """
1286 a8083063 Iustin Pop
    node_name = self.op.node_name
1287 a8083063 Iustin Pop
    cfg = self.cfg
1288 a8083063 Iustin Pop
1289 a8083063 Iustin Pop
    dns_data = utils.LookupHostname(node_name)
1290 a8083063 Iustin Pop
    if not dns_data:
1291 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node %s is not resolvable" % node_name)
1292 a8083063 Iustin Pop
1293 a8083063 Iustin Pop
    node = dns_data['hostname']
1294 a8083063 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data['ip']
1295 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1296 a8083063 Iustin Pop
    if secondary_ip is None:
1297 a8083063 Iustin Pop
      secondary_ip = primary_ip
1298 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1299 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid secondary IP given")
1300 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1301 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1302 a8083063 Iustin Pop
    if node in node_list:
1303 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node %s is already in the configuration"
1304 a8083063 Iustin Pop
                                   % node)
1305 a8083063 Iustin Pop
1306 a8083063 Iustin Pop
    for existing_node_name in node_list:
1307 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1308 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1309 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1310 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1311 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1312 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("New node ip address(es) conflict with"
1313 a8083063 Iustin Pop
                                     " existing node %s" % existing_node.name)
1314 a8083063 Iustin Pop
1315 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1316 a8083063 Iustin Pop
    # same as for the master
1317 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1318 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1319 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1320 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1321 a8083063 Iustin Pop
      if master_singlehomed:
1322 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("The master has no private ip but the"
1323 a8083063 Iustin Pop
                                     " new node has one")
1324 a8083063 Iustin Pop
      else:
1325 a8083063 Iustin Pop
        raise errors.OpPrereqError ("The master has a private ip but the"
1326 a8083063 Iustin Pop
                                    " new node doesn't have one")
1327 a8083063 Iustin Pop
1328 a8083063 Iustin Pop
    # checks reachablity
1329 a8083063 Iustin Pop
    command = ["fping", "-q", primary_ip]
1330 a8083063 Iustin Pop
    result = utils.RunCmd(command)
1331 a8083063 Iustin Pop
    if result.failed:
1332 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node not reachable by ping")
1333 a8083063 Iustin Pop
1334 a8083063 Iustin Pop
    if not newbie_singlehomed:
1335 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1336 a8083063 Iustin Pop
      command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip]
1337 a8083063 Iustin Pop
      result = utils.RunCmd(command)
1338 a8083063 Iustin Pop
      if result.failed:
1339 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Node secondary ip not reachable by ping")
1340 a8083063 Iustin Pop
1341 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1342 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1343 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1344 a8083063 Iustin Pop
1345 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1346 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1347 a8083063 Iustin Pop

1348 a8083063 Iustin Pop
    """
1349 a8083063 Iustin Pop
    new_node = self.new_node
1350 a8083063 Iustin Pop
    node = new_node.name
1351 a8083063 Iustin Pop
1352 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1353 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1354 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1355 a8083063 Iustin Pop
      raise errors.OpExecError, ("ganeti password corruption detected")
1356 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1357 a8083063 Iustin Pop
    try:
1358 a8083063 Iustin Pop
      gntpem = f.read(8192)
1359 a8083063 Iustin Pop
    finally:
1360 a8083063 Iustin Pop
      f.close()
1361 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1362 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1363 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1364 a8083063 Iustin Pop
    # parsed by the shell sequence below
1365 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1366 a8083063 Iustin Pop
      raise errors.OpExecError, ("invalid PEM encoding in the SSL certificate")
1367 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1368 a8083063 Iustin Pop
      raise errors.OpExecError, ("PEM must end with newline")
1369 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1370 a8083063 Iustin Pop
1371 a8083063 Iustin Pop
    # remove first the root's known_hosts file
1372 a8083063 Iustin Pop
    utils.RemoveFile("/root/.ssh/known_hosts")
1373 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1374 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1375 a8083063 Iustin Pop
    # either by being constants or by the checks above
1376 a8083063 Iustin Pop
    ss = self.sstore
1377 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1378 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1379 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1380 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1381 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1382 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1383 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1384 a8083063 Iustin Pop
1385 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1386 a8083063 Iustin Pop
    if result.failed:
1387 a8083063 Iustin Pop
      raise errors.OpExecError, ("Remote command on node %s, error: %s,"
1388 a8083063 Iustin Pop
                                 " output: %s" %
1389 a8083063 Iustin Pop
                                 (node, result.fail_reason, result.output))
1390 a8083063 Iustin Pop
1391 a8083063 Iustin Pop
    # check connectivity
1392 a8083063 Iustin Pop
    time.sleep(4)
1393 a8083063 Iustin Pop
1394 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1395 a8083063 Iustin Pop
    if result:
1396 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1397 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1398 a8083063 Iustin Pop
                    (node, result))
1399 a8083063 Iustin Pop
      else:
1400 a8083063 Iustin Pop
        raise errors.OpExecError, ("Version mismatch master version %s,"
1401 a8083063 Iustin Pop
                                   " node version %s" %
1402 a8083063 Iustin Pop
                                   (constants.PROTOCOL_VERSION, result))
1403 a8083063 Iustin Pop
    else:
1404 a8083063 Iustin Pop
      raise errors.OpExecError, ("Cannot get version from the new node")
1405 a8083063 Iustin Pop
1406 a8083063 Iustin Pop
    # setup ssh on node
1407 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1408 a8083063 Iustin Pop
    keyarray = []
1409 a8083063 Iustin Pop
    keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub",
1410 a8083063 Iustin Pop
                "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub",
1411 a8083063 Iustin Pop
                "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"]
1412 a8083063 Iustin Pop
1413 a8083063 Iustin Pop
    for i in keyfiles:
1414 a8083063 Iustin Pop
      f = open(i, 'r')
1415 a8083063 Iustin Pop
      try:
1416 a8083063 Iustin Pop
        keyarray.append(f.read())
1417 a8083063 Iustin Pop
      finally:
1418 a8083063 Iustin Pop
        f.close()
1419 a8083063 Iustin Pop
1420 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1421 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1422 a8083063 Iustin Pop
1423 a8083063 Iustin Pop
    if not result:
1424 a8083063 Iustin Pop
      raise errors.OpExecError, ("Cannot transfer ssh keys to the new node")
1425 a8083063 Iustin Pop
1426 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1427 a8083063 Iustin Pop
    _UpdateEtcHosts(new_node.name, new_node.primary_ip)
1428 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1429 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1430 a8083063 Iustin Pop
1431 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1432 a8083063 Iustin Pop
      result = ssh.SSHCall(node, "root",
1433 a8083063 Iustin Pop
                           "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip)
1434 a8083063 Iustin Pop
      if result.failed:
1435 a8083063 Iustin Pop
        raise errors.OpExecError, ("Node claims it doesn't have the"
1436 a8083063 Iustin Pop
                                   " secondary ip you gave (%s).\n"
1437 a8083063 Iustin Pop
                                   "Please fix and re-run this command." %
1438 a8083063 Iustin Pop
                                   new_node.secondary_ip)
1439 a8083063 Iustin Pop
1440 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1441 a8083063 Iustin Pop
    # including the node just added
1442 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1443 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1444 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1445 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1446 a8083063 Iustin Pop
1447 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1448 a8083063 Iustin Pop
    for fname in ("/etc/hosts", "/etc/ssh/ssh_known_hosts"):
1449 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1450 a8083063 Iustin Pop
      for to_node in dist_nodes:
1451 a8083063 Iustin Pop
        if not result[to_node]:
1452 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1453 a8083063 Iustin Pop
                       (fname, to_node))
1454 a8083063 Iustin Pop
1455 880478f8 Iustin Pop
    to_copy = [constants.MASTER_CRON_FILE]
1456 a8083063 Iustin Pop
    to_copy.extend(ss.GetFileList())
1457 a8083063 Iustin Pop
    for fname in to_copy:
1458 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1459 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1460 a8083063 Iustin Pop
1461 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1462 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1463 a8083063 Iustin Pop
1464 a8083063 Iustin Pop
1465 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1466 a8083063 Iustin Pop
  """Failover the master node to the current node.
1467 a8083063 Iustin Pop

1468 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1469 a8083063 Iustin Pop

1470 a8083063 Iustin Pop
  """
1471 a8083063 Iustin Pop
  HPATH = "master-failover"
1472 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1473 a8083063 Iustin Pop
  REQ_MASTER = False
1474 a8083063 Iustin Pop
  _OP_REQP = []
1475 a8083063 Iustin Pop
1476 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1477 a8083063 Iustin Pop
    """Build hooks env.
1478 a8083063 Iustin Pop

1479 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1480 a8083063 Iustin Pop
    the nodes in the post phase.
1481 a8083063 Iustin Pop

1482 a8083063 Iustin Pop
    """
1483 a8083063 Iustin Pop
    env = {
1484 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1485 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1486 a8083063 Iustin Pop
      }
1487 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1488 a8083063 Iustin Pop
1489 a8083063 Iustin Pop
  def CheckPrereq(self):
1490 a8083063 Iustin Pop
    """Check prerequisites.
1491 a8083063 Iustin Pop

1492 a8083063 Iustin Pop
    This checks that we are not already the master.
1493 a8083063 Iustin Pop

1494 a8083063 Iustin Pop
    """
1495 a8083063 Iustin Pop
    self.new_master = socket.gethostname()
1496 a8083063 Iustin Pop
1497 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1498 a8083063 Iustin Pop
1499 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1500 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("This commands must be run on the node"
1501 a8083063 Iustin Pop
                                   " where you want the new master to be.\n"
1502 a8083063 Iustin Pop
                                   "%s is already the master" %
1503 a8083063 Iustin Pop
                                   self.old_master)
1504 a8083063 Iustin Pop
1505 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1506 a8083063 Iustin Pop
    """Failover the master node.
1507 a8083063 Iustin Pop

1508 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1509 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1510 a8083063 Iustin Pop
    master.
1511 a8083063 Iustin Pop

1512 a8083063 Iustin Pop
    """
1513 a8083063 Iustin Pop
1514 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1515 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1516 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1517 a8083063 Iustin Pop
1518 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1519 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1520 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1521 a8083063 Iustin Pop
1522 880478f8 Iustin Pop
    ss = self.sstore
1523 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1524 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1525 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1526 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1527 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1528 880478f8 Iustin Pop
1529 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1530 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1531 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1532 880478f8 Iustin Pop
      feedback_fn("Error in activating the master IP on the new master,\n"
1533 880478f8 Iustin Pop
                  "please fix manually.")
1534 a8083063 Iustin Pop
1535 a8083063 Iustin Pop
1536 a8083063 Iustin Pop
1537 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1538 a8083063 Iustin Pop
  """Query cluster configuration.
1539 a8083063 Iustin Pop

1540 a8083063 Iustin Pop
  """
1541 a8083063 Iustin Pop
  _OP_REQP = []
1542 a8083063 Iustin Pop
1543 a8083063 Iustin Pop
  def CheckPrereq(self):
1544 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1545 a8083063 Iustin Pop

1546 a8083063 Iustin Pop
    """
1547 a8083063 Iustin Pop
    pass
1548 a8083063 Iustin Pop
1549 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1550 a8083063 Iustin Pop
    """Return cluster config.
1551 a8083063 Iustin Pop

1552 a8083063 Iustin Pop
    """
1553 a8083063 Iustin Pop
    instances = [self.cfg.GetInstanceInfo(name)
1554 a8083063 Iustin Pop
                 for name in self.cfg.GetInstanceList()]
1555 a8083063 Iustin Pop
    result = {
1556 a8083063 Iustin Pop
      "name": self.cfg.GetClusterName(),
1557 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1558 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1559 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1560 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1561 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1562 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1563 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1564 a8083063 Iustin Pop
      "instances": [(instance.name, instance.primary_node)
1565 a8083063 Iustin Pop
                    for instance in instances],
1566 a8083063 Iustin Pop
      "nodes": self.cfg.GetNodeList(),
1567 a8083063 Iustin Pop
      }
1568 a8083063 Iustin Pop
1569 a8083063 Iustin Pop
    return result
1570 a8083063 Iustin Pop
1571 a8083063 Iustin Pop
1572 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1573 a8083063 Iustin Pop
  """Copy file to cluster.
1574 a8083063 Iustin Pop

1575 a8083063 Iustin Pop
  """
1576 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1577 a8083063 Iustin Pop
1578 a8083063 Iustin Pop
  def CheckPrereq(self):
1579 a8083063 Iustin Pop
    """Check prerequisites.
1580 a8083063 Iustin Pop

1581 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1582 a8083063 Iustin Pop
    of nodes is valid.
1583 a8083063 Iustin Pop

1584 a8083063 Iustin Pop
    """
1585 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1586 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1587 dcb93971 Michael Hanselmann
1588 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1589 a8083063 Iustin Pop
1590 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1591 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1592 a8083063 Iustin Pop

1593 a8083063 Iustin Pop
    Args:
1594 a8083063 Iustin Pop
      opts - class with options as members
1595 a8083063 Iustin Pop
      args - list containing a single element, the file name
1596 a8083063 Iustin Pop
    Opts used:
1597 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1598 a8083063 Iustin Pop

1599 a8083063 Iustin Pop
    """
1600 a8083063 Iustin Pop
    filename = self.op.filename
1601 a8083063 Iustin Pop
1602 a8083063 Iustin Pop
    myname = socket.gethostname()
1603 a8083063 Iustin Pop
1604 a8083063 Iustin Pop
    for node in self.nodes:
1605 a8083063 Iustin Pop
      if node == myname:
1606 a8083063 Iustin Pop
        continue
1607 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1608 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1609 a8083063 Iustin Pop
1610 a8083063 Iustin Pop
1611 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1612 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1613 a8083063 Iustin Pop

1614 a8083063 Iustin Pop
  """
1615 a8083063 Iustin Pop
  _OP_REQP = []
1616 a8083063 Iustin Pop
1617 a8083063 Iustin Pop
  def CheckPrereq(self):
1618 a8083063 Iustin Pop
    """No prerequisites.
1619 a8083063 Iustin Pop

1620 a8083063 Iustin Pop
    """
1621 a8083063 Iustin Pop
    pass
1622 a8083063 Iustin Pop
1623 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1624 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1625 a8083063 Iustin Pop

1626 a8083063 Iustin Pop
    """
1627 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1628 a8083063 Iustin Pop
1629 a8083063 Iustin Pop
1630 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1631 a8083063 Iustin Pop
  """Run a command on some nodes.
1632 a8083063 Iustin Pop

1633 a8083063 Iustin Pop
  """
1634 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1635 a8083063 Iustin Pop
1636 a8083063 Iustin Pop
  def CheckPrereq(self):
1637 a8083063 Iustin Pop
    """Check prerequisites.
1638 a8083063 Iustin Pop

1639 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1640 a8083063 Iustin Pop

1641 a8083063 Iustin Pop
    """
1642 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1643 a8083063 Iustin Pop
1644 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1645 a8083063 Iustin Pop
    """Run a command on some nodes.
1646 a8083063 Iustin Pop

1647 a8083063 Iustin Pop
    """
1648 a8083063 Iustin Pop
    data = []
1649 a8083063 Iustin Pop
    for node in self.nodes:
1650 dcb93971 Michael Hanselmann
      result = utils.RunCmd(["ssh", node.name, self.op.command])
1651 dcb93971 Michael Hanselmann
      data.append((node.name, result.cmd, result.output, result.exit_code))
1652 a8083063 Iustin Pop
1653 a8083063 Iustin Pop
    return data
1654 a8083063 Iustin Pop
1655 a8083063 Iustin Pop
1656 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1657 a8083063 Iustin Pop
  """Bring up an instance's disks.
1658 a8083063 Iustin Pop

1659 a8083063 Iustin Pop
  """
1660 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1661 a8083063 Iustin Pop
1662 a8083063 Iustin Pop
  def CheckPrereq(self):
1663 a8083063 Iustin Pop
    """Check prerequisites.
1664 a8083063 Iustin Pop

1665 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1666 a8083063 Iustin Pop

1667 a8083063 Iustin Pop
    """
1668 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1669 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1670 a8083063 Iustin Pop
    if instance is None:
1671 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1672 a8083063 Iustin Pop
                                   self.op.instance_name)
1673 a8083063 Iustin Pop
    self.instance = instance
1674 a8083063 Iustin Pop
1675 a8083063 Iustin Pop
1676 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1677 a8083063 Iustin Pop
    """Activate the disks.
1678 a8083063 Iustin Pop

1679 a8083063 Iustin Pop
    """
1680 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1681 a8083063 Iustin Pop
    if not disks_ok:
1682 a8083063 Iustin Pop
      raise errors.OpExecError, ("Cannot activate block devices")
1683 a8083063 Iustin Pop
1684 a8083063 Iustin Pop
    return disks_info
1685 a8083063 Iustin Pop
1686 a8083063 Iustin Pop
1687 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1688 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1689 a8083063 Iustin Pop

1690 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1691 a8083063 Iustin Pop

1692 a8083063 Iustin Pop
  Args:
1693 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1694 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1695 a8083063 Iustin Pop
                        in an error return from the function
1696 a8083063 Iustin Pop

1697 a8083063 Iustin Pop
  Returns:
1698 a8083063 Iustin Pop
    false if the operation failed
1699 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1700 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1701 a8083063 Iustin Pop
  """
1702 a8083063 Iustin Pop
  device_info = []
1703 a8083063 Iustin Pop
  disks_ok = True
1704 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1705 a8083063 Iustin Pop
    master_result = None
1706 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1707 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1708 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1709 a8083063 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, is_primary)
1710 a8083063 Iustin Pop
      if not result:
1711 a8083063 Iustin Pop
        logger.Error("could not prepare block device %s on node %s (is_pri"
1712 a8083063 Iustin Pop
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1713 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1714 a8083063 Iustin Pop
          disks_ok = False
1715 a8083063 Iustin Pop
      if is_primary:
1716 a8083063 Iustin Pop
        master_result = result
1717 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1718 a8083063 Iustin Pop
                        master_result))
1719 a8083063 Iustin Pop
1720 a8083063 Iustin Pop
  return disks_ok, device_info
1721 a8083063 Iustin Pop
1722 a8083063 Iustin Pop
1723 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1724 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1725 a8083063 Iustin Pop

1726 a8083063 Iustin Pop
  """
1727 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1728 a8083063 Iustin Pop
1729 a8083063 Iustin Pop
  def CheckPrereq(self):
1730 a8083063 Iustin Pop
    """Check prerequisites.
1731 a8083063 Iustin Pop

1732 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1733 a8083063 Iustin Pop

1734 a8083063 Iustin Pop
    """
1735 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1736 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1737 a8083063 Iustin Pop
    if instance is None:
1738 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1739 a8083063 Iustin Pop
                                   self.op.instance_name)
1740 a8083063 Iustin Pop
    self.instance = instance
1741 a8083063 Iustin Pop
1742 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1743 a8083063 Iustin Pop
    """Deactivate the disks
1744 a8083063 Iustin Pop

1745 a8083063 Iustin Pop
    """
1746 a8083063 Iustin Pop
    instance = self.instance
1747 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1748 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1749 a8083063 Iustin Pop
    if not type(ins_l) is list:
1750 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't contact node '%s'" %
1751 a8083063 Iustin Pop
                                 instance.primary_node)
1752 a8083063 Iustin Pop
1753 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1754 a8083063 Iustin Pop
      raise errors.OpExecError, ("Instance is running, can't shutdown"
1755 a8083063 Iustin Pop
                                 " block devices.")
1756 a8083063 Iustin Pop
1757 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1758 a8083063 Iustin Pop
1759 a8083063 Iustin Pop
1760 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1761 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1762 a8083063 Iustin Pop

1763 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1764 a8083063 Iustin Pop

1765 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1766 a8083063 Iustin Pop
  ignored.
1767 a8083063 Iustin Pop

1768 a8083063 Iustin Pop
  """
1769 a8083063 Iustin Pop
  result = True
1770 a8083063 Iustin Pop
  for disk in instance.disks:
1771 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1772 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1773 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1774 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1775 a8083063 Iustin Pop
                     (disk.iv_name, node))
1776 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1777 a8083063 Iustin Pop
          result = False
1778 a8083063 Iustin Pop
  return result
1779 a8083063 Iustin Pop
1780 a8083063 Iustin Pop
1781 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1782 a8083063 Iustin Pop
  """Starts an instance.
1783 a8083063 Iustin Pop

1784 a8083063 Iustin Pop
  """
1785 a8083063 Iustin Pop
  HPATH = "instance-start"
1786 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1787 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1788 a8083063 Iustin Pop
1789 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1790 a8083063 Iustin Pop
    """Build hooks env.
1791 a8083063 Iustin Pop

1792 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1793 a8083063 Iustin Pop

1794 a8083063 Iustin Pop
    """
1795 a8083063 Iustin Pop
    env = {
1796 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
1797 a8083063 Iustin Pop
      "INSTANCE_PRIMARY": self.instance.primary_node,
1798 a8083063 Iustin Pop
      "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
1799 a8083063 Iustin Pop
      "FORCE": self.op.force,
1800 a8083063 Iustin Pop
      }
1801 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1802 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1803 a8083063 Iustin Pop
    return env, nl, nl
1804 a8083063 Iustin Pop
1805 a8083063 Iustin Pop
  def CheckPrereq(self):
1806 a8083063 Iustin Pop
    """Check prerequisites.
1807 a8083063 Iustin Pop

1808 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1809 a8083063 Iustin Pop

1810 a8083063 Iustin Pop
    """
1811 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1812 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1813 a8083063 Iustin Pop
    if instance is None:
1814 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1815 a8083063 Iustin Pop
                                   self.op.instance_name)
1816 a8083063 Iustin Pop
1817 a8083063 Iustin Pop
    # check bridges existance
1818 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
1819 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
1820 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("one or more target bridges %s does not"
1821 a8083063 Iustin Pop
                                   " exist on destination node '%s'" %
1822 a8083063 Iustin Pop
                                   (brlist, instance.primary_node))
1823 a8083063 Iustin Pop
1824 a8083063 Iustin Pop
    self.instance = instance
1825 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1826 a8083063 Iustin Pop
1827 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1828 a8083063 Iustin Pop
    """Start the instance.
1829 a8083063 Iustin Pop

1830 a8083063 Iustin Pop
    """
1831 a8083063 Iustin Pop
    instance = self.instance
1832 a8083063 Iustin Pop
    force = self.op.force
1833 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1834 a8083063 Iustin Pop
1835 a8083063 Iustin Pop
    node_current = instance.primary_node
1836 a8083063 Iustin Pop
1837 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1838 a8083063 Iustin Pop
    if not nodeinfo:
1839 a8083063 Iustin Pop
      raise errors.OpExecError, ("Could not contact node %s for infos" %
1840 a8083063 Iustin Pop
                                 (node_current))
1841 a8083063 Iustin Pop
1842 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
1843 a8083063 Iustin Pop
    memory = instance.memory
1844 a8083063 Iustin Pop
    if memory > freememory:
1845 a8083063 Iustin Pop
      raise errors.OpExecError, ("Not enough memory to start instance"
1846 a8083063 Iustin Pop
                                 " %s on node %s"
1847 a8083063 Iustin Pop
                                 " needed %s MiB, available %s MiB" %
1848 a8083063 Iustin Pop
                                 (instance.name, node_current, memory,
1849 a8083063 Iustin Pop
                                  freememory))
1850 a8083063 Iustin Pop
1851 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
1852 a8083063 Iustin Pop
                                             ignore_secondaries=force)
1853 a8083063 Iustin Pop
    if not disks_ok:
1854 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
1855 a8083063 Iustin Pop
      if not force:
1856 a8083063 Iustin Pop
        logger.Error("If the message above refers to a secondary node,"
1857 a8083063 Iustin Pop
                     " you can retry the operation using '--force'.")
1858 a8083063 Iustin Pop
      raise errors.OpExecError, ("Disk consistency error")
1859 a8083063 Iustin Pop
1860 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
1861 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
1862 a8083063 Iustin Pop
      raise errors.OpExecError, ("Could not start instance")
1863 a8083063 Iustin Pop
1864 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
1865 a8083063 Iustin Pop
1866 a8083063 Iustin Pop
1867 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
1868 a8083063 Iustin Pop
  """Shutdown an instance.
1869 a8083063 Iustin Pop

1870 a8083063 Iustin Pop
  """
1871 a8083063 Iustin Pop
  HPATH = "instance-stop"
1872 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1873 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1874 a8083063 Iustin Pop
1875 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1876 a8083063 Iustin Pop
    """Build hooks env.
1877 a8083063 Iustin Pop

1878 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1879 a8083063 Iustin Pop

1880 a8083063 Iustin Pop
    """
1881 a8083063 Iustin Pop
    env = {
1882 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
1883 a8083063 Iustin Pop
      "INSTANCE_PRIMARY": self.instance.primary_node,
1884 a8083063 Iustin Pop
      "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
1885 a8083063 Iustin Pop
      }
1886 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1887 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1888 a8083063 Iustin Pop
    return env, nl, nl
1889 a8083063 Iustin Pop
1890 a8083063 Iustin Pop
  def CheckPrereq(self):
1891 a8083063 Iustin Pop
    """Check prerequisites.
1892 a8083063 Iustin Pop

1893 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1894 a8083063 Iustin Pop

1895 a8083063 Iustin Pop
    """
1896 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1897 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1898 a8083063 Iustin Pop
    if instance is None:
1899 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1900 a8083063 Iustin Pop
                                   self.op.instance_name)
1901 a8083063 Iustin Pop
    self.instance = instance
1902 a8083063 Iustin Pop
1903 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1904 a8083063 Iustin Pop
    """Shutdown the instance.
1905 a8083063 Iustin Pop

1906 a8083063 Iustin Pop
    """
1907 a8083063 Iustin Pop
    instance = self.instance
1908 a8083063 Iustin Pop
    node_current = instance.primary_node
1909 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
1910 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
1911 a8083063 Iustin Pop
1912 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
1913 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1914 a8083063 Iustin Pop
1915 a8083063 Iustin Pop
1916 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
1917 a8083063 Iustin Pop
  """Remove an instance.
1918 a8083063 Iustin Pop

1919 a8083063 Iustin Pop
  """
1920 a8083063 Iustin Pop
  HPATH = "instance-remove"
1921 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1922 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1923 a8083063 Iustin Pop
1924 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1925 a8083063 Iustin Pop
    """Build hooks env.
1926 a8083063 Iustin Pop

1927 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1928 a8083063 Iustin Pop

1929 a8083063 Iustin Pop
    """
1930 a8083063 Iustin Pop
    env = {
1931 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
1932 a8083063 Iustin Pop
      "INSTANCE_PRIMARY": self.instance.primary_node,
1933 a8083063 Iustin Pop
      "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
1934 a8083063 Iustin Pop
      }
1935 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1936 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1937 a8083063 Iustin Pop
    return env, nl, nl
1938 a8083063 Iustin Pop
1939 a8083063 Iustin Pop
  def CheckPrereq(self):
1940 a8083063 Iustin Pop
    """Check prerequisites.
1941 a8083063 Iustin Pop

1942 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1943 a8083063 Iustin Pop

1944 a8083063 Iustin Pop
    """
1945 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1946 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1947 a8083063 Iustin Pop
    if instance is None:
1948 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1949 a8083063 Iustin Pop
                                   self.op.instance_name)
1950 a8083063 Iustin Pop
    self.instance = instance
1951 a8083063 Iustin Pop
1952 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1953 a8083063 Iustin Pop
    """Remove the instance.
1954 a8083063 Iustin Pop

1955 a8083063 Iustin Pop
    """
1956 a8083063 Iustin Pop
    instance = self.instance
1957 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
1958 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
1959 a8083063 Iustin Pop
1960 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
1961 a8083063 Iustin Pop
      raise errors.OpExecError, ("Could not shutdown instance %s on node %s" %
1962 a8083063 Iustin Pop
                                 (instance.name, instance.primary_node))
1963 a8083063 Iustin Pop
1964 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
1965 a8083063 Iustin Pop
1966 a8083063 Iustin Pop
    _RemoveDisks(instance, self.cfg)
1967 a8083063 Iustin Pop
1968 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
1969 a8083063 Iustin Pop
1970 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
1971 a8083063 Iustin Pop
1972 a8083063 Iustin Pop
1973 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
1974 a8083063 Iustin Pop
  """Logical unit for querying instances.
1975 a8083063 Iustin Pop

1976 a8083063 Iustin Pop
  """
1977 dcb93971 Michael Hanselmann
  _OP_REQP = ["output_fields"]
1978 a8083063 Iustin Pop
1979 a8083063 Iustin Pop
  def CheckPrereq(self):
1980 a8083063 Iustin Pop
    """Check prerequisites.
1981 a8083063 Iustin Pop

1982 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1983 a8083063 Iustin Pop

1984 a8083063 Iustin Pop
    """
1985 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
1986 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
1987 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
1988 dcb93971 Michael Hanselmann
                               "disk_template", "ip", "mac", "bridge"],
1989 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1990 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1991 a8083063 Iustin Pop
1992 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1993 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1994 a8083063 Iustin Pop

1995 a8083063 Iustin Pop
    """
1996 a8083063 Iustin Pop
1997 a8083063 Iustin Pop
    instance_names = utils.NiceSort(self.cfg.GetInstanceList())
1998 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
1999 a8083063 Iustin Pop
                     in instance_names]
2000 a8083063 Iustin Pop
2001 a8083063 Iustin Pop
    # begin data gathering
2002 a8083063 Iustin Pop
2003 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2004 a8083063 Iustin Pop
2005 a8083063 Iustin Pop
    bad_nodes = []
2006 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2007 a8083063 Iustin Pop
      live_data = {}
2008 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2009 a8083063 Iustin Pop
      for name in nodes:
2010 a8083063 Iustin Pop
        result = node_data[name]
2011 a8083063 Iustin Pop
        if result:
2012 a8083063 Iustin Pop
          live_data.update(result)
2013 a8083063 Iustin Pop
        elif result == False:
2014 a8083063 Iustin Pop
          bad_nodes.append(name)
2015 a8083063 Iustin Pop
        # else no instance is alive
2016 a8083063 Iustin Pop
    else:
2017 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2018 a8083063 Iustin Pop
2019 a8083063 Iustin Pop
    # end data gathering
2020 a8083063 Iustin Pop
2021 a8083063 Iustin Pop
    output = []
2022 a8083063 Iustin Pop
    for instance in instance_list:
2023 a8083063 Iustin Pop
      iout = []
2024 a8083063 Iustin Pop
      for field in self.op.output_fields:
2025 a8083063 Iustin Pop
        if field == "name":
2026 a8083063 Iustin Pop
          val = instance.name
2027 a8083063 Iustin Pop
        elif field == "os":
2028 a8083063 Iustin Pop
          val = instance.os
2029 a8083063 Iustin Pop
        elif field == "pnode":
2030 a8083063 Iustin Pop
          val = instance.primary_node
2031 a8083063 Iustin Pop
        elif field == "snodes":
2032 a8083063 Iustin Pop
          val = ",".join(instance.secondary_nodes) or "-"
2033 a8083063 Iustin Pop
        elif field == "admin_state":
2034 a8083063 Iustin Pop
          if instance.status == "down":
2035 a8083063 Iustin Pop
            val = "no"
2036 a8083063 Iustin Pop
          else:
2037 a8083063 Iustin Pop
            val = "yes"
2038 a8083063 Iustin Pop
        elif field == "oper_state":
2039 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2040 a8083063 Iustin Pop
            val = "(node down)"
2041 a8083063 Iustin Pop
          else:
2042 a8083063 Iustin Pop
            if live_data.get(instance.name):
2043 a8083063 Iustin Pop
              val = "running"
2044 a8083063 Iustin Pop
            else:
2045 a8083063 Iustin Pop
              val = "stopped"
2046 a8083063 Iustin Pop
        elif field == "admin_ram":
2047 a8083063 Iustin Pop
          val = instance.memory
2048 a8083063 Iustin Pop
        elif field == "oper_ram":
2049 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2050 a8083063 Iustin Pop
            val = "(node down)"
2051 a8083063 Iustin Pop
          elif instance.name in live_data:
2052 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2053 a8083063 Iustin Pop
          else:
2054 a8083063 Iustin Pop
            val = "-"
2055 a8083063 Iustin Pop
        elif field == "disk_template":
2056 a8083063 Iustin Pop
          val = instance.disk_template
2057 a8083063 Iustin Pop
        elif field == "ip":
2058 a8083063 Iustin Pop
          val = instance.nics[0].ip
2059 a8083063 Iustin Pop
        elif field == "bridge":
2060 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2061 a8083063 Iustin Pop
        elif field == "mac":
2062 a8083063 Iustin Pop
          val = instance.nics[0].mac
2063 a8083063 Iustin Pop
        else:
2064 a8083063 Iustin Pop
          raise errors.ParameterError, field
2065 a8083063 Iustin Pop
        val = str(val)
2066 a8083063 Iustin Pop
        iout.append(val)
2067 a8083063 Iustin Pop
      output.append(iout)
2068 a8083063 Iustin Pop
2069 a8083063 Iustin Pop
    return output
2070 a8083063 Iustin Pop
2071 a8083063 Iustin Pop
2072 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2073 a8083063 Iustin Pop
  """Failover an instance.
2074 a8083063 Iustin Pop

2075 a8083063 Iustin Pop
  """
2076 a8083063 Iustin Pop
  HPATH = "instance-failover"
2077 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2078 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2079 a8083063 Iustin Pop
2080 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2081 a8083063 Iustin Pop
    """Build hooks env.
2082 a8083063 Iustin Pop

2083 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2084 a8083063 Iustin Pop

2085 a8083063 Iustin Pop
    """
2086 a8083063 Iustin Pop
    env = {
2087 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
2088 a8083063 Iustin Pop
      "INSTANCE_PRIMARY": self.instance.primary_node,
2089 a8083063 Iustin Pop
      "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
2090 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2091 a8083063 Iustin Pop
      }
2092 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2093 a8083063 Iustin Pop
    return env, nl, nl
2094 a8083063 Iustin Pop
2095 a8083063 Iustin Pop
  def CheckPrereq(self):
2096 a8083063 Iustin Pop
    """Check prerequisites.
2097 a8083063 Iustin Pop

2098 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2099 a8083063 Iustin Pop

2100 a8083063 Iustin Pop
    """
2101 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2102 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2103 a8083063 Iustin Pop
    if instance is None:
2104 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2105 a8083063 Iustin Pop
                                   self.op.instance_name)
2106 a8083063 Iustin Pop
2107 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2108 3a7c308e Guido Trotter
    target_node = instance.secondary_nodes[0]
2109 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2110 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2111 3a7c308e Guido Trotter
    if not info:
2112 3a7c308e Guido Trotter
      raise errors.OpPrereqError, ("Cannot get current information"
2113 3a7c308e Guido Trotter
                                   " from node '%s'" % nodeinfo)
2114 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2115 3a7c308e Guido Trotter
      raise errors.OpPrereqError, ("Not enough memory on target node %s."
2116 3a7c308e Guido Trotter
                                   " %d MB available, %d MB required" %
2117 3a7c308e Guido Trotter
                                   (target_node, info['memory_free'],
2118 3a7c308e Guido Trotter
                                    instance.memory))
2119 3a7c308e Guido Trotter
2120 a8083063 Iustin Pop
    # check bridge existance
2121 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2122 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
2123 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("one or more target bridges %s does not"
2124 a8083063 Iustin Pop
                                   " exist on destination node '%s'" %
2125 a8083063 Iustin Pop
                                   (brlist, instance.primary_node))
2126 a8083063 Iustin Pop
2127 a8083063 Iustin Pop
    self.instance = instance
2128 a8083063 Iustin Pop
2129 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2130 a8083063 Iustin Pop
    """Failover an instance.
2131 a8083063 Iustin Pop

2132 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2133 a8083063 Iustin Pop
    starting it on the secondary.
2134 a8083063 Iustin Pop

2135 a8083063 Iustin Pop
    """
2136 a8083063 Iustin Pop
    instance = self.instance
2137 a8083063 Iustin Pop
2138 a8083063 Iustin Pop
    source_node = instance.primary_node
2139 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2140 a8083063 Iustin Pop
2141 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2142 a8083063 Iustin Pop
    for dev in instance.disks:
2143 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2144 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2145 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2146 a8083063 Iustin Pop
          raise errors.OpExecError, ("Disk %s is degraded on target node,"
2147 a8083063 Iustin Pop
                                     " aborting failover." % dev.iv_name)
2148 a8083063 Iustin Pop
2149 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2150 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2151 a8083063 Iustin Pop
2152 a8083063 Iustin Pop
    if not nodeinfo:
2153 a8083063 Iustin Pop
      raise errors.OpExecError, ("Could not contact target node %s." %
2154 a8083063 Iustin Pop
                                 target_node)
2155 a8083063 Iustin Pop
2156 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2157 a8083063 Iustin Pop
    memory = instance.memory
2158 a8083063 Iustin Pop
    if memory > free_memory:
2159 a8083063 Iustin Pop
      raise errors.OpExecError, ("Not enough memory to create instance %s on"
2160 a8083063 Iustin Pop
                                 " node %s. needed %s MiB, available %s MiB" %
2161 a8083063 Iustin Pop
                                 (instance.name, target_node, memory,
2162 a8083063 Iustin Pop
                                  free_memory))
2163 a8083063 Iustin Pop
2164 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2165 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2166 a8083063 Iustin Pop
                (instance.name, source_node))
2167 a8083063 Iustin Pop
2168 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2169 a8083063 Iustin Pop
      logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2170 a8083063 Iustin Pop
                   " anyway. Please make sure node %s is down"  %
2171 a8083063 Iustin Pop
                   (instance.name, source_node, source_node))
2172 a8083063 Iustin Pop
2173 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2174 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2175 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't shut down the instance's disks.")
2176 a8083063 Iustin Pop
2177 a8083063 Iustin Pop
    instance.primary_node = target_node
2178 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2179 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2180 a8083063 Iustin Pop
2181 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2182 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2183 a8083063 Iustin Pop
                (instance.name, target_node))
2184 a8083063 Iustin Pop
2185 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2186 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2187 a8083063 Iustin Pop
    if not disks_ok:
2188 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2189 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't activate the instance's disks")
2190 a8083063 Iustin Pop
2191 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2192 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2193 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2194 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2195 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2196 a8083063 Iustin Pop
2197 a8083063 Iustin Pop
2198 a8083063 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, device):
2199 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2200 a8083063 Iustin Pop

2201 a8083063 Iustin Pop
  This always creates all devices.
2202 a8083063 Iustin Pop

2203 a8083063 Iustin Pop
  """
2204 a8083063 Iustin Pop
2205 a8083063 Iustin Pop
  if device.children:
2206 a8083063 Iustin Pop
    for child in device.children:
2207 a8083063 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, child):
2208 a8083063 Iustin Pop
        return False
2209 a8083063 Iustin Pop
2210 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2211 a8083063 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size, True)
2212 a8083063 Iustin Pop
  if not new_id:
2213 a8083063 Iustin Pop
    return False
2214 a8083063 Iustin Pop
  if device.physical_id is None:
2215 a8083063 Iustin Pop
    device.physical_id = new_id
2216 a8083063 Iustin Pop
  return True
2217 a8083063 Iustin Pop
2218 a8083063 Iustin Pop
2219 a8083063 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, device, force):
2220 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2221 a8083063 Iustin Pop

2222 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2223 a8083063 Iustin Pop
  all its children.
2224 a8083063 Iustin Pop

2225 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2226 a8083063 Iustin Pop

2227 a8083063 Iustin Pop
  """
2228 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2229 a8083063 Iustin Pop
    force = True
2230 a8083063 Iustin Pop
  if device.children:
2231 a8083063 Iustin Pop
    for child in device.children:
2232 a8083063 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, child, force):
2233 a8083063 Iustin Pop
        return False
2234 a8083063 Iustin Pop
2235 a8083063 Iustin Pop
  if not force:
2236 a8083063 Iustin Pop
    return True
2237 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2238 a8083063 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size, False)
2239 a8083063 Iustin Pop
  if not new_id:
2240 a8083063 Iustin Pop
    return False
2241 a8083063 Iustin Pop
  if device.physical_id is None:
2242 a8083063 Iustin Pop
    device.physical_id = new_id
2243 a8083063 Iustin Pop
  return True
2244 a8083063 Iustin Pop
2245 a8083063 Iustin Pop
2246 a8083063 Iustin Pop
def _GenerateMDDRBDBranch(cfg, vgname, primary, secondary, size, base):
2247 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2248 a8083063 Iustin Pop

2249 a8083063 Iustin Pop
  """
2250 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2251 a8083063 Iustin Pop
  base = "%s_%s" % (base, port)
2252 a8083063 Iustin Pop
  dev_data = objects.Disk(dev_type="lvm", size=size,
2253 a8083063 Iustin Pop
                          logical_id=(vgname, "%s.data" % base))
2254 a8083063 Iustin Pop
  dev_meta = objects.Disk(dev_type="lvm", size=128,
2255 a8083063 Iustin Pop
                          logical_id=(vgname, "%s.meta" % base))
2256 a8083063 Iustin Pop
  drbd_dev = objects.Disk(dev_type="drbd", size=size,
2257 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2258 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2259 a8083063 Iustin Pop
  return drbd_dev
2260 a8083063 Iustin Pop
2261 a8083063 Iustin Pop
2262 a8083063 Iustin Pop
def _GenerateDiskTemplate(cfg, vgname, template_name,
2263 a8083063 Iustin Pop
                          instance_name, primary_node,
2264 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2265 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2266 a8083063 Iustin Pop

2267 a8083063 Iustin Pop
  """
2268 a8083063 Iustin Pop
  #TODO: compute space requirements
2269 a8083063 Iustin Pop
2270 a8083063 Iustin Pop
  if template_name == "diskless":
2271 a8083063 Iustin Pop
    disks = []
2272 a8083063 Iustin Pop
  elif template_name == "plain":
2273 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2274 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2275 a8083063 Iustin Pop
    sda_dev = objects.Disk(dev_type="lvm", size=disk_sz,
2276 a8083063 Iustin Pop
                           logical_id=(vgname, "%s.os" % instance_name),
2277 a8083063 Iustin Pop
                           iv_name = "sda")
2278 a8083063 Iustin Pop
    sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz,
2279 a8083063 Iustin Pop
                           logical_id=(vgname, "%s.swap" % instance_name),
2280 a8083063 Iustin Pop
                           iv_name = "sdb")
2281 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2282 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2283 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2284 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2285 a8083063 Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz,
2286 a8083063 Iustin Pop
                              logical_id=(vgname, "%s.os_m1" % instance_name))
2287 a8083063 Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz,
2288 a8083063 Iustin Pop
                              logical_id=(vgname, "%s.os_m2" % instance_name))
2289 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda",
2290 a8083063 Iustin Pop
                              size=disk_sz,
2291 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2292 a8083063 Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz,
2293 a8083063 Iustin Pop
                              logical_id=(vgname, "%s.swap_m1" %
2294 a8083063 Iustin Pop
                                          instance_name))
2295 a8083063 Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz,
2296 a8083063 Iustin Pop
                              logical_id=(vgname, "%s.swap_m2" %
2297 a8083063 Iustin Pop
                                          instance_name))
2298 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb",
2299 a8083063 Iustin Pop
                              size=swap_sz,
2300 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2301 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2302 a8083063 Iustin Pop
  elif template_name == "remote_raid1":
2303 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2304 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2305 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2306 a8083063 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, vgname,
2307 a8083063 Iustin Pop
                                         primary_node, remote_node, disk_sz,
2308 a8083063 Iustin Pop
                                         "%s-sda" % instance_name)
2309 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda",
2310 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2311 a8083063 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, vgname,
2312 a8083063 Iustin Pop
                                         primary_node, remote_node, swap_sz,
2313 a8083063 Iustin Pop
                                         "%s-sdb" % instance_name)
2314 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb",
2315 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2316 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2317 a8083063 Iustin Pop
  else:
2318 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2319 a8083063 Iustin Pop
  return disks
2320 a8083063 Iustin Pop
2321 a8083063 Iustin Pop
2322 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2323 a8083063 Iustin Pop
  """Create all disks for an instance.
2324 a8083063 Iustin Pop

2325 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2326 a8083063 Iustin Pop

2327 a8083063 Iustin Pop
  Args:
2328 a8083063 Iustin Pop
    instance: the instance object
2329 a8083063 Iustin Pop

2330 a8083063 Iustin Pop
  Returns:
2331 a8083063 Iustin Pop
    True or False showing the success of the creation process
2332 a8083063 Iustin Pop

2333 a8083063 Iustin Pop
  """
2334 a8083063 Iustin Pop
  for device in instance.disks:
2335 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2336 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2337 a8083063 Iustin Pop
    #HARDCODE
2338 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2339 a8083063 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False):
2340 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2341 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2342 a8083063 Iustin Pop
        return False
2343 a8083063 Iustin Pop
    #HARDCODE
2344 a8083063 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device):
2345 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2346 a8083063 Iustin Pop
                   device.iv_name)
2347 a8083063 Iustin Pop
      return False
2348 a8083063 Iustin Pop
  return True
2349 a8083063 Iustin Pop
2350 a8083063 Iustin Pop
2351 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2352 a8083063 Iustin Pop
  """Remove all disks for an instance.
2353 a8083063 Iustin Pop

2354 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2355 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2356 a8083063 Iustin Pop
  be remove, the removal will continue with the other ones (compare
2357 a8083063 Iustin Pop
  with `_CreateDisks()`).
2358 a8083063 Iustin Pop

2359 a8083063 Iustin Pop
  Args:
2360 a8083063 Iustin Pop
    instance: the instance object
2361 a8083063 Iustin Pop

2362 a8083063 Iustin Pop
  Returns:
2363 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2364 a8083063 Iustin Pop

2365 a8083063 Iustin Pop
  """
2366 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2367 a8083063 Iustin Pop
2368 a8083063 Iustin Pop
  result = True
2369 a8083063 Iustin Pop
  for device in instance.disks:
2370 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2371 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2372 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2373 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2374 a8083063 Iustin Pop
                     " continuing anyway" %
2375 a8083063 Iustin Pop
                     (device.iv_name, node))
2376 a8083063 Iustin Pop
        result = False
2377 a8083063 Iustin Pop
  return result
2378 a8083063 Iustin Pop
2379 a8083063 Iustin Pop
2380 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2381 a8083063 Iustin Pop
  """Create an instance.
2382 a8083063 Iustin Pop

2383 a8083063 Iustin Pop
  """
2384 a8083063 Iustin Pop
  HPATH = "instance-add"
2385 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2386 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2387 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2388 a8083063 Iustin Pop
              "wait_for_sync"]
2389 a8083063 Iustin Pop
2390 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2391 a8083063 Iustin Pop
    """Build hooks env.
2392 a8083063 Iustin Pop

2393 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2394 a8083063 Iustin Pop

2395 a8083063 Iustin Pop
    """
2396 a8083063 Iustin Pop
    env = {
2397 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
2398 a8083063 Iustin Pop
      "INSTANCE_PRIMARY": self.op.pnode,
2399 a8083063 Iustin Pop
      "INSTANCE_SECONDARIES": " ".join(self.secondaries),
2400 a8083063 Iustin Pop
      "DISK_TEMPLATE": self.op.disk_template,
2401 a8083063 Iustin Pop
      "MEM_SIZE": self.op.mem_size,
2402 a8083063 Iustin Pop
      "DISK_SIZE": self.op.disk_size,
2403 a8083063 Iustin Pop
      "SWAP_SIZE": self.op.swap_size,
2404 a8083063 Iustin Pop
      "VCPUS": self.op.vcpus,
2405 a8083063 Iustin Pop
      "BRIDGE": self.op.bridge,
2406 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2407 a8083063 Iustin Pop
      }
2408 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2409 a8083063 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
2410 a8083063 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
2411 a8083063 Iustin Pop
      env["SRC_IMAGE"] = self.src_image
2412 a8083063 Iustin Pop
    if self.inst_ip:
2413 a8083063 Iustin Pop
      env["INSTANCE_IP"] = self.inst_ip
2414 a8083063 Iustin Pop
2415 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2416 a8083063 Iustin Pop
          self.secondaries)
2417 a8083063 Iustin Pop
    return env, nl, nl
2418 a8083063 Iustin Pop
2419 a8083063 Iustin Pop
2420 a8083063 Iustin Pop
  def CheckPrereq(self):
2421 a8083063 Iustin Pop
    """Check prerequisites.
2422 a8083063 Iustin Pop

2423 a8083063 Iustin Pop
    """
2424 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2425 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2426 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid instance creation mode '%s'" %
2427 a8083063 Iustin Pop
                                   self.op.mode)
2428 a8083063 Iustin Pop
2429 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2430 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2431 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2432 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2433 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Importing an instance requires source"
2434 a8083063 Iustin Pop
                                     " node and path options")
2435 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2436 a8083063 Iustin Pop
      if src_node_full is None:
2437 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Unknown source node '%s'" % src_node)
2438 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2439 a8083063 Iustin Pop
2440 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2441 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("The source path must be absolute")
2442 a8083063 Iustin Pop
2443 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2444 a8083063 Iustin Pop
2445 a8083063 Iustin Pop
      if not export_info:
2446 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("No export found in dir %s" % src_path)
2447 a8083063 Iustin Pop
2448 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2449 a8083063 Iustin Pop
        raise errors.ProgrammerError, ("Corrupted export config")
2450 a8083063 Iustin Pop
2451 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2452 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2453 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Wrong export version %s (wanted %d)" %
2454 a8083063 Iustin Pop
                                     (ei_version, constants.EXPORT_VERSION))
2455 a8083063 Iustin Pop
2456 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2457 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Can't import instance with more than"
2458 a8083063 Iustin Pop
                                     " one data disk")
2459 a8083063 Iustin Pop
2460 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2461 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2462 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2463 a8083063 Iustin Pop
                                                         'disk0_dump'))
2464 a8083063 Iustin Pop
      self.src_image = diskimage
2465 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2466 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2467 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("No guest OS specified")
2468 a8083063 Iustin Pop
2469 a8083063 Iustin Pop
    # check primary node
2470 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2471 a8083063 Iustin Pop
    if pnode is None:
2472 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Primary node '%s' is uknown" %
2473 a8083063 Iustin Pop
                                   self.op.pnode)
2474 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2475 a8083063 Iustin Pop
    self.pnode = pnode
2476 a8083063 Iustin Pop
    self.secondaries = []
2477 a8083063 Iustin Pop
    # disk template and mirror node verification
2478 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2479 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid disk template name")
2480 a8083063 Iustin Pop
2481 a8083063 Iustin Pop
    if self.op.disk_template == constants.DT_REMOTE_RAID1:
2482 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2483 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("The 'remote_raid1' disk template needs"
2484 a8083063 Iustin Pop
                                     " a mirror node")
2485 a8083063 Iustin Pop
2486 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2487 a8083063 Iustin Pop
      if snode_name is None:
2488 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Unknown secondary node '%s'" %
2489 a8083063 Iustin Pop
                                     self.op.snode)
2490 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2491 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("The secondary node cannot be"
2492 a8083063 Iustin Pop
                                     " the primary node.")
2493 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2494 a8083063 Iustin Pop
2495 ed1ebc60 Guido Trotter
    # Check lv size requirements
2496 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2497 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2498 ed1ebc60 Guido Trotter
2499 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2500 ed1ebc60 Guido Trotter
    req_size_dict = {
2501 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2502 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2503 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2504 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2505 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2506 ed1ebc60 Guido Trotter
    }
2507 ed1ebc60 Guido Trotter
2508 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2509 ed1ebc60 Guido Trotter
      raise errors.ProgrammerError, ("Disk template '%s' size requirement"
2510 ed1ebc60 Guido Trotter
                                     " is unknown" %  self.op.disk_template)
2511 ed1ebc60 Guido Trotter
2512 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2513 ed1ebc60 Guido Trotter
2514 ed1ebc60 Guido Trotter
    for node in nodenames:
2515 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2516 ed1ebc60 Guido Trotter
      if not info:
2517 ed1ebc60 Guido Trotter
        raise errors.OpPrereqError, ("Cannot get current information"
2518 ed1ebc60 Guido Trotter
                                     " from node '%s'" % nodeinfo)
2519 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2520 ed1ebc60 Guido Trotter
        raise errors.OpPrereqError, ("Not enough disk space on target node %s."
2521 ed1ebc60 Guido Trotter
                                     " %d MB available, %d MB required" %
2522 ed1ebc60 Guido Trotter
                                     (node, info['vg_free'], req_size))
2523 ed1ebc60 Guido Trotter
2524 a8083063 Iustin Pop
    # os verification
2525 a8083063 Iustin Pop
    os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2526 a8083063 Iustin Pop
    if not isinstance(os_obj, objects.OS):
2527 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("OS '%s' not in supported os list for"
2528 a8083063 Iustin Pop
                                   " primary node"  % self.op.os_type)
2529 a8083063 Iustin Pop
2530 a8083063 Iustin Pop
    # instance verification
2531 a8083063 Iustin Pop
    hostname1 = utils.LookupHostname(self.op.instance_name)
2532 a8083063 Iustin Pop
    if not hostname1:
2533 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance name '%s' not found in dns" %
2534 a8083063 Iustin Pop
                                   self.op.instance_name)
2535 a8083063 Iustin Pop
2536 a8083063 Iustin Pop
    self.op.instance_name = instance_name = hostname1['hostname']
2537 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2538 a8083063 Iustin Pop
    if instance_name in instance_list:
2539 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' is already in the cluster" %
2540 a8083063 Iustin Pop
                                   instance_name)
2541 a8083063 Iustin Pop
2542 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2543 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2544 a8083063 Iustin Pop
      inst_ip = None
2545 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2546 a8083063 Iustin Pop
      inst_ip = hostname1['ip']
2547 a8083063 Iustin Pop
    else:
2548 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2549 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("given IP address '%s' doesn't look"
2550 a8083063 Iustin Pop
                                     " like a valid IP" % ip)
2551 a8083063 Iustin Pop
      inst_ip = ip
2552 a8083063 Iustin Pop
    self.inst_ip = inst_ip
2553 a8083063 Iustin Pop
2554 a8083063 Iustin Pop
    command = ["fping", "-q", hostname1['ip']]
2555 a8083063 Iustin Pop
    result = utils.RunCmd(command)
2556 a8083063 Iustin Pop
    if not result.failed:
2557 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("IP %s of instance %s already in use" %
2558 a8083063 Iustin Pop
                                   (hostname1['ip'], instance_name))
2559 a8083063 Iustin Pop
2560 a8083063 Iustin Pop
    # bridge verification
2561 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
2562 a8083063 Iustin Pop
    if bridge is None:
2563 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
2564 a8083063 Iustin Pop
    else:
2565 a8083063 Iustin Pop
      self.op.bridge = bridge
2566 a8083063 Iustin Pop
2567 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
2568 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("target bridge '%s' does not exist on"
2569 a8083063 Iustin Pop
                                   " destination node '%s'" %
2570 a8083063 Iustin Pop
                                   (self.op.bridge, pnode.name))
2571 a8083063 Iustin Pop
2572 a8083063 Iustin Pop
    if self.op.start:
2573 a8083063 Iustin Pop
      self.instance_status = 'up'
2574 a8083063 Iustin Pop
    else:
2575 a8083063 Iustin Pop
      self.instance_status = 'down'
2576 a8083063 Iustin Pop
2577 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2578 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
2579 a8083063 Iustin Pop

2580 a8083063 Iustin Pop
    """
2581 a8083063 Iustin Pop
    instance = self.op.instance_name
2582 a8083063 Iustin Pop
    pnode_name = self.pnode.name
2583 a8083063 Iustin Pop
2584 a8083063 Iustin Pop
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
2585 a8083063 Iustin Pop
    if self.inst_ip is not None:
2586 a8083063 Iustin Pop
      nic.ip = self.inst_ip
2587 a8083063 Iustin Pop
2588 a8083063 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg, self.cfg.GetVGName(),
2589 a8083063 Iustin Pop
                                  self.op.disk_template,
2590 a8083063 Iustin Pop
                                  instance, pnode_name,
2591 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
2592 a8083063 Iustin Pop
                                  self.op.swap_size)
2593 a8083063 Iustin Pop
2594 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
2595 a8083063 Iustin Pop
                            primary_node=pnode_name,
2596 a8083063 Iustin Pop
                            memory=self.op.mem_size,
2597 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
2598 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
2599 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
2600 a8083063 Iustin Pop
                            status=self.instance_status,
2601 a8083063 Iustin Pop
                            )
2602 a8083063 Iustin Pop
2603 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
2604 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
2605 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2606 a8083063 Iustin Pop
      raise errors.OpExecError, ("Device creation failed, reverting...")
2607 a8083063 Iustin Pop
2608 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
2609 a8083063 Iustin Pop
2610 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
2611 a8083063 Iustin Pop
2612 a8083063 Iustin Pop
    if self.op.wait_for_sync:
2613 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj)
2614 a8083063 Iustin Pop
    elif iobj.disk_template == "remote_raid1":
2615 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
2616 a8083063 Iustin Pop
      time.sleep(15)
2617 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
2618 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
2619 a8083063 Iustin Pop
    else:
2620 a8083063 Iustin Pop
      disk_abort = False
2621 a8083063 Iustin Pop
2622 a8083063 Iustin Pop
    if disk_abort:
2623 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2624 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
2625 a8083063 Iustin Pop
      raise errors.OpExecError, ("There are some degraded disks for"
2626 a8083063 Iustin Pop
                                      " this instance")
2627 a8083063 Iustin Pop
2628 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
2629 a8083063 Iustin Pop
                (instance, pnode_name))
2630 a8083063 Iustin Pop
2631 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
2632 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
2633 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
2634 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
2635 a8083063 Iustin Pop
          raise errors.OpExecError, ("could not add os for instance %s"
2636 a8083063 Iustin Pop
                                          " on node %s" %
2637 a8083063 Iustin Pop
                                          (instance, pnode_name))
2638 a8083063 Iustin Pop
2639 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
2640 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
2641 a8083063 Iustin Pop
        src_node = self.op.src_node
2642 a8083063 Iustin Pop
        src_image = self.src_image
2643 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
2644 a8083063 Iustin Pop
                                                src_node, src_image):
2645 a8083063 Iustin Pop
          raise errors.OpExecError, ("Could not import os for instance"
2646 a8083063 Iustin Pop
                                          " %s on node %s" %
2647 a8083063 Iustin Pop
                                          (instance, pnode_name))
2648 a8083063 Iustin Pop
      else:
2649 a8083063 Iustin Pop
        # also checked in the prereq part
2650 a8083063 Iustin Pop
        raise errors.ProgrammerError, ("Unknown OS initialization mode '%s'"
2651 a8083063 Iustin Pop
                                       % self.op.mode)
2652 a8083063 Iustin Pop
2653 a8083063 Iustin Pop
    if self.op.start:
2654 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
2655 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
2656 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
2657 a8083063 Iustin Pop
        raise errors.OpExecError, ("Could not start instance")
2658 a8083063 Iustin Pop
2659 a8083063 Iustin Pop
2660 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
2661 a8083063 Iustin Pop
  """Connect to an instance's console.
2662 a8083063 Iustin Pop

2663 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
2664 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
2665 a8083063 Iustin Pop
  console.
2666 a8083063 Iustin Pop

2667 a8083063 Iustin Pop
  """
2668 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2669 a8083063 Iustin Pop
2670 a8083063 Iustin Pop
  def CheckPrereq(self):
2671 a8083063 Iustin Pop
    """Check prerequisites.
2672 a8083063 Iustin Pop

2673 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2674 a8083063 Iustin Pop

2675 a8083063 Iustin Pop
    """
2676 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2677 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2678 a8083063 Iustin Pop
    if instance is None:
2679 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2680 a8083063 Iustin Pop
                                   self.op.instance_name)
2681 a8083063 Iustin Pop
    self.instance = instance
2682 a8083063 Iustin Pop
2683 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2684 a8083063 Iustin Pop
    """Connect to the console of an instance
2685 a8083063 Iustin Pop

2686 a8083063 Iustin Pop
    """
2687 a8083063 Iustin Pop
    instance = self.instance
2688 a8083063 Iustin Pop
    node = instance.primary_node
2689 a8083063 Iustin Pop
2690 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
2691 a8083063 Iustin Pop
    if node_insts is False:
2692 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't connect to node %s." % node)
2693 a8083063 Iustin Pop
2694 a8083063 Iustin Pop
    if instance.name not in node_insts:
2695 a8083063 Iustin Pop
      raise errors.OpExecError, ("Instance %s is not running." % instance.name)
2696 a8083063 Iustin Pop
2697 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
2698 a8083063 Iustin Pop
2699 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
2700 a8083063 Iustin Pop
    console_cmd = hyper.GetShellCommandForConsole(instance.name)
2701 a8083063 Iustin Pop
    return node, console_cmd
2702 a8083063 Iustin Pop
2703 a8083063 Iustin Pop
2704 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
2705 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
2706 a8083063 Iustin Pop

2707 a8083063 Iustin Pop
  """
2708 a8083063 Iustin Pop
  HPATH = "mirror-add"
2709 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2710 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
2711 a8083063 Iustin Pop
2712 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2713 a8083063 Iustin Pop
    """Build hooks env.
2714 a8083063 Iustin Pop

2715 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2716 a8083063 Iustin Pop

2717 a8083063 Iustin Pop
    """
2718 a8083063 Iustin Pop
    env = {
2719 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
2720 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
2721 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
2722 a8083063 Iustin Pop
      }
2723 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
2724 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
2725 a8083063 Iustin Pop
    return env, nl, nl
2726 a8083063 Iustin Pop
2727 a8083063 Iustin Pop
  def CheckPrereq(self):
2728 a8083063 Iustin Pop
    """Check prerequisites.
2729 a8083063 Iustin Pop

2730 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2731 a8083063 Iustin Pop

2732 a8083063 Iustin Pop
    """
2733 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2734 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2735 a8083063 Iustin Pop
    if instance is None:
2736 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2737 a8083063 Iustin Pop
                                   self.op.instance_name)
2738 a8083063 Iustin Pop
    self.instance = instance
2739 a8083063 Iustin Pop
2740 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
2741 a8083063 Iustin Pop
    if remote_node is None:
2742 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' not known" % self.op.remote_node)
2743 a8083063 Iustin Pop
    self.remote_node = remote_node
2744 a8083063 Iustin Pop
2745 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
2746 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("The specified node is the primary node of"
2747 a8083063 Iustin Pop
                                   " the instance.")
2748 a8083063 Iustin Pop
2749 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2750 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance's disk layout is not"
2751 a8083063 Iustin Pop
                                   " remote_raid1.")
2752 a8083063 Iustin Pop
    for disk in instance.disks:
2753 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
2754 a8083063 Iustin Pop
        break
2755 a8083063 Iustin Pop
    else:
2756 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Can't find this device ('%s') in the"
2757 a8083063 Iustin Pop
                                   " instance." % self.op.disk_name)
2758 a8083063 Iustin Pop
    if len(disk.children) > 1:
2759 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("The device already has two slave"
2760 a8083063 Iustin Pop
                                   " devices.\n"
2761 a8083063 Iustin Pop
                                   "This would create a 3-disk raid1"
2762 a8083063 Iustin Pop
                                   " which we don't allow.")
2763 a8083063 Iustin Pop
    self.disk = disk
2764 a8083063 Iustin Pop
2765 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2766 a8083063 Iustin Pop
    """Add the mirror component
2767 a8083063 Iustin Pop

2768 a8083063 Iustin Pop
    """
2769 a8083063 Iustin Pop
    disk = self.disk
2770 a8083063 Iustin Pop
    instance = self.instance
2771 a8083063 Iustin Pop
2772 a8083063 Iustin Pop
    remote_node = self.remote_node
2773 72d6c464 Michael Hanselmann
    new_drbd = _GenerateMDDRBDBranch(self.cfg, self.cfg.GetVGName(),
2774 72d6c464 Michael Hanselmann
                                     instance.primary_node, remote_node,
2775 72d6c464 Michael Hanselmann
                                     disk.size, "%s-%s" %
2776 a8083063 Iustin Pop
                                     (instance.name, self.op.disk_name))
2777 a8083063 Iustin Pop
2778 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
2779 a8083063 Iustin Pop
    #HARDCODE
2780 a8083063 Iustin Pop
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False):
2781 a8083063 Iustin Pop
      raise errors.OpExecError, ("Failed to create new component on secondary"
2782 a8083063 Iustin Pop
                                 " node %s" % remote_node)
2783 a8083063 Iustin Pop
2784 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
2785 a8083063 Iustin Pop
    #HARDCODE
2786 a8083063 Iustin Pop
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd):
2787 a8083063 Iustin Pop
      # remove secondary dev
2788 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
2789 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
2790 a8083063 Iustin Pop
      raise errors.OpExecError, ("Failed to create volume on primary")
2791 a8083063 Iustin Pop
2792 a8083063 Iustin Pop
    # the device exists now
2793 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
2794 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
2795 a8083063 Iustin Pop
    if not rpc.call_blockdev_addchild(instance.primary_node,
2796 a8083063 Iustin Pop
                                           disk, new_drbd):
2797 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
2798 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
2799 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
2800 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
2801 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
2802 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
2803 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
2804 a8083063 Iustin Pop
      raise errors.OpExecError, "Can't add mirror component to md array"
2805 a8083063 Iustin Pop
2806 a8083063 Iustin Pop
    disk.children.append(new_drbd)
2807 a8083063 Iustin Pop
2808 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2809 a8083063 Iustin Pop
2810 a8083063 Iustin Pop
    _WaitForSync(self.cfg, instance)
2811 a8083063 Iustin Pop
2812 a8083063 Iustin Pop
    return 0
2813 a8083063 Iustin Pop
2814 a8083063 Iustin Pop
2815 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
2816 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
2817 a8083063 Iustin Pop

2818 a8083063 Iustin Pop
  """
2819 a8083063 Iustin Pop
  HPATH = "mirror-remove"
2820 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2821 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
2822 a8083063 Iustin Pop
2823 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2824 a8083063 Iustin Pop
    """Build hooks env.
2825 a8083063 Iustin Pop

2826 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2827 a8083063 Iustin Pop

2828 a8083063 Iustin Pop
    """
2829 a8083063 Iustin Pop
    env = {
2830 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
2831 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
2832 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
2833 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
2834 a8083063 Iustin Pop
      }
2835 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
2836 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
2837 a8083063 Iustin Pop
    return env, nl, nl
2838 a8083063 Iustin Pop
2839 a8083063 Iustin Pop
  def CheckPrereq(self):
2840 a8083063 Iustin Pop
    """Check prerequisites.
2841 a8083063 Iustin Pop

2842 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2843 a8083063 Iustin Pop

2844 a8083063 Iustin Pop
    """
2845 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2846 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2847 a8083063 Iustin Pop
    if instance is None:
2848 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2849 a8083063 Iustin Pop
                                   self.op.instance_name)
2850 a8083063 Iustin Pop
    self.instance = instance
2851 a8083063 Iustin Pop
2852 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2853 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance's disk layout is not"
2854 a8083063 Iustin Pop
                                   " remote_raid1.")
2855 a8083063 Iustin Pop
    for disk in instance.disks:
2856 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
2857 a8083063 Iustin Pop
        break
2858 a8083063 Iustin Pop
    else:
2859 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Can't find this device ('%s') in the"
2860 a8083063 Iustin Pop
                                   " instance." % self.op.disk_name)
2861 a8083063 Iustin Pop
    for child in disk.children:
2862 a8083063 Iustin Pop
      if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id:
2863 a8083063 Iustin Pop
        break
2864 a8083063 Iustin Pop
    else:
2865 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Can't find the device with this port.")
2866 a8083063 Iustin Pop
2867 a8083063 Iustin Pop
    if len(disk.children) < 2:
2868 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Cannot remove the last component from"
2869 a8083063 Iustin Pop
                                   " a mirror.")
2870 a8083063 Iustin Pop
    self.disk = disk
2871 a8083063 Iustin Pop
    self.child = child
2872 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
2873 a8083063 Iustin Pop
      oid = 1
2874 a8083063 Iustin Pop
    else:
2875 a8083063 Iustin Pop
      oid = 0
2876 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
2877 a8083063 Iustin Pop
2878 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2879 a8083063 Iustin Pop
    """Remove the mirror component
2880 a8083063 Iustin Pop

2881 a8083063 Iustin Pop
    """
2882 a8083063 Iustin Pop
    instance = self.instance
2883 a8083063 Iustin Pop
    disk = self.disk
2884 a8083063 Iustin Pop
    child = self.child
2885 a8083063 Iustin Pop
    logger.Info("remove mirror component")
2886 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
2887 a8083063 Iustin Pop
    if not rpc.call_blockdev_removechild(instance.primary_node,
2888 a8083063 Iustin Pop
                                              disk, child):
2889 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't remove child from mirror.")
2890 a8083063 Iustin Pop
2891 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
2892 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
2893 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
2894 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
2895 a8083063 Iustin Pop
                     " continuing operation." % node)
2896 a8083063 Iustin Pop
2897 a8083063 Iustin Pop
    disk.children.remove(child)
2898 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2899 a8083063 Iustin Pop
2900 a8083063 Iustin Pop
2901 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
2902 a8083063 Iustin Pop
  """Replace the disks of an instance.
2903 a8083063 Iustin Pop

2904 a8083063 Iustin Pop
  """
2905 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
2906 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2907 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2908 a8083063 Iustin Pop
2909 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2910 a8083063 Iustin Pop
    """Build hooks env.
2911 a8083063 Iustin Pop

2912 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2913 a8083063 Iustin Pop

2914 a8083063 Iustin Pop
    """
2915 a8083063 Iustin Pop
    env = {
2916 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
2917 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
2918 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
2919 a8083063 Iustin Pop
      }
2920 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
2921 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
2922 a8083063 Iustin Pop
    return env, nl, nl
2923 a8083063 Iustin Pop
2924 a8083063 Iustin Pop
  def CheckPrereq(self):
2925 a8083063 Iustin Pop
    """Check prerequisites.
2926 a8083063 Iustin Pop

2927 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2928 a8083063 Iustin Pop

2929 a8083063 Iustin Pop
    """
2930 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2931 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2932 a8083063 Iustin Pop
    if instance is None:
2933 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2934 a8083063 Iustin Pop
                                   self.op.instance_name)
2935 a8083063 Iustin Pop
    self.instance = instance
2936 a8083063 Iustin Pop
2937 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2938 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance's disk layout is not"
2939 a8083063 Iustin Pop
                                   " remote_raid1.")
2940 a8083063 Iustin Pop
2941 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
2942 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("The instance has a strange layout,"
2943 a8083063 Iustin Pop
                                   " expected one secondary but found %d" %
2944 a8083063 Iustin Pop
                                   len(instance.secondary_nodes))
2945 a8083063 Iustin Pop
2946 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
2947 a8083063 Iustin Pop
    if remote_node is None:
2948 a8083063 Iustin Pop
      remote_node = instance.secondary_nodes[0]
2949 a8083063 Iustin Pop
    else:
2950 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
2951 a8083063 Iustin Pop
      if remote_node is None:
2952 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Node '%s' not known" %
2953 a8083063 Iustin Pop
                                     self.op.remote_node)
2954 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
2955 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("The specified node is the primary node of"
2956 a8083063 Iustin Pop
                                   " the instance.")
2957 a8083063 Iustin Pop
    self.op.remote_node = remote_node
2958 a8083063 Iustin Pop
2959 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2960 a8083063 Iustin Pop
    """Replace the disks of an instance.
2961 a8083063 Iustin Pop

2962 a8083063 Iustin Pop
    """
2963 a8083063 Iustin Pop
    instance = self.instance
2964 a8083063 Iustin Pop
    iv_names = {}
2965 a8083063 Iustin Pop
    # start of work
2966 a8083063 Iustin Pop
    remote_node = self.op.remote_node
2967 a8083063 Iustin Pop
    cfg = self.cfg
2968 880478f8 Iustin Pop
    vgname = cfg.GetVGName()
2969 a8083063 Iustin Pop
    for dev in instance.disks:
2970 a8083063 Iustin Pop
      size = dev.size
2971 880478f8 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, vgname, instance.primary_node,
2972 880478f8 Iustin Pop
                                       remote_node, size,
2973 a8083063 Iustin Pop
                                       "%s-%s" % (instance.name, dev.iv_name))
2974 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
2975 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
2976 a8083063 Iustin Pop
                  dev.iv_name)
2977 a8083063 Iustin Pop
      #HARDCODE
2978 a8083063 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False):
2979 a8083063 Iustin Pop
        raise errors.OpExecError, ("Failed to create new component on"
2980 a8083063 Iustin Pop
                                   " secondary node %s\n"
2981 a8083063 Iustin Pop
                                   "Full abort, cleanup manually!" %
2982 a8083063 Iustin Pop
                                   remote_node)
2983 a8083063 Iustin Pop
2984 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
2985 a8083063 Iustin Pop
      #HARDCODE
2986 a8083063 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd):
2987 a8083063 Iustin Pop
        # remove secondary dev
2988 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
2989 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
2990 a8083063 Iustin Pop
        raise errors.OpExecError("Failed to create volume on primary!\n"
2991 a8083063 Iustin Pop
                                 "Full abort, cleanup manually!!")
2992 a8083063 Iustin Pop
2993 a8083063 Iustin Pop
      # the device exists now
2994 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
2995 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
2996 a8083063 Iustin Pop
      if not rpc.call_blockdev_addchild(instance.primary_node, dev,
2997 880478f8 Iustin Pop
                                        new_drbd):
2998 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
2999 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3000 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3001 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3002 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3003 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3004 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3005 a8083063 Iustin Pop
        raise errors.OpExecError, ("Full abort, cleanup manually!!")
3006 a8083063 Iustin Pop
3007 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3008 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3009 a8083063 Iustin Pop
3010 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3011 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3012 a8083063 Iustin Pop
    # return value
3013 a8083063 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3014 a8083063 Iustin Pop
3015 a8083063 Iustin Pop
    # so check manually all the devices
3016 a8083063 Iustin Pop
    for name in iv_names:
3017 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3018 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3019 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3020 a8083063 Iustin Pop
      if is_degr:
3021 a8083063 Iustin Pop
        raise errors.OpExecError, ("MD device %s is degraded!" % name)
3022 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3023 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3024 a8083063 Iustin Pop
      if is_degr:
3025 a8083063 Iustin Pop
        raise errors.OpExecError, ("New drbd device %s is degraded!" % name)
3026 a8083063 Iustin Pop
3027 a8083063 Iustin Pop
    for name in iv_names:
3028 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3029 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3030 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3031 a8083063 Iustin Pop
      if not rpc.call_blockdev_removechild(instance.primary_node,
3032 a8083063 Iustin Pop
                                                dev, child):
3033 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3034 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3035 a8083063 Iustin Pop
        continue
3036 a8083063 Iustin Pop
3037 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3038 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3039 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3040 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3041 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3042 a8083063 Iustin Pop
                       " continuing operation." % node)
3043 a8083063 Iustin Pop
3044 a8083063 Iustin Pop
      dev.children.remove(child)
3045 a8083063 Iustin Pop
3046 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3047 a8083063 Iustin Pop
3048 a8083063 Iustin Pop
3049 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3050 a8083063 Iustin Pop
  """Query runtime instance data.
3051 a8083063 Iustin Pop

3052 a8083063 Iustin Pop
  """
3053 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3054 a8083063 Iustin Pop
3055 a8083063 Iustin Pop
  def CheckPrereq(self):
3056 a8083063 Iustin Pop
    """Check prerequisites.
3057 a8083063 Iustin Pop

3058 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3059 a8083063 Iustin Pop

3060 a8083063 Iustin Pop
    """
3061 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3062 a8083063 Iustin Pop
      raise errors.OpPrereqError, "Invalid argument type 'instances'"
3063 a8083063 Iustin Pop
    if self.op.instances:
3064 a8083063 Iustin Pop
      self.wanted_instances = []
3065 a8083063 Iustin Pop
      names = self.op.instances
3066 a8083063 Iustin Pop
      for name in names:
3067 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3068 a8083063 Iustin Pop
        if instance is None:
3069 a8083063 Iustin Pop
          raise errors.OpPrereqError, ("No such instance name '%s'" % name)
3070 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3071 a8083063 Iustin Pop
    else:
3072 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3073 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3074 a8083063 Iustin Pop
    return
3075 a8083063 Iustin Pop
3076 a8083063 Iustin Pop
3077 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3078 a8083063 Iustin Pop
    """Compute block device status.
3079 a8083063 Iustin Pop

3080 a8083063 Iustin Pop
    """
3081 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3082 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3083 a8083063 Iustin Pop
    if dev.dev_type == "drbd":
3084 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3085 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3086 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3087 a8083063 Iustin Pop
      else:
3088 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3089 a8083063 Iustin Pop
3090 a8083063 Iustin Pop
    if snode:
3091 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3092 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3093 a8083063 Iustin Pop
    else:
3094 a8083063 Iustin Pop
      dev_sstatus = None
3095 a8083063 Iustin Pop
3096 a8083063 Iustin Pop
    if dev.children:
3097 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3098 a8083063 Iustin Pop
                      for child in dev.children]
3099 a8083063 Iustin Pop
    else:
3100 a8083063 Iustin Pop
      dev_children = []
3101 a8083063 Iustin Pop
3102 a8083063 Iustin Pop
    data = {
3103 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3104 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3105 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3106 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3107 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3108 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3109 a8083063 Iustin Pop
      "children": dev_children,
3110 a8083063 Iustin Pop
      }
3111 a8083063 Iustin Pop
3112 a8083063 Iustin Pop
    return data
3113 a8083063 Iustin Pop
3114 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3115 a8083063 Iustin Pop
    """Gather and return data"""
3116 a8083063 Iustin Pop
3117 a8083063 Iustin Pop
    result = {}
3118 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3119 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3120 a8083063 Iustin Pop
                                                instance.name)
3121 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3122 a8083063 Iustin Pop
        remote_state = "up"
3123 a8083063 Iustin Pop
      else:
3124 a8083063 Iustin Pop
        remote_state = "down"
3125 a8083063 Iustin Pop
      if instance.status == "down":
3126 a8083063 Iustin Pop
        config_state = "down"
3127 a8083063 Iustin Pop
      else:
3128 a8083063 Iustin Pop
        config_state = "up"
3129 a8083063 Iustin Pop
3130 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3131 a8083063 Iustin Pop
               for device in instance.disks]
3132 a8083063 Iustin Pop
3133 a8083063 Iustin Pop
      idict = {
3134 a8083063 Iustin Pop
        "name": instance.name,
3135 a8083063 Iustin Pop
        "config_state": config_state,
3136 a8083063 Iustin Pop
        "run_state": remote_state,
3137 a8083063 Iustin Pop
        "pnode": instance.primary_node,
3138 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
3139 a8083063 Iustin Pop
        "os": instance.os,
3140 a8083063 Iustin Pop
        "memory": instance.memory,
3141 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3142 a8083063 Iustin Pop
        "disks": disks,
3143 a8083063 Iustin Pop
        }
3144 a8083063 Iustin Pop
3145 a8083063 Iustin Pop
      result[instance.name] = idict
3146 a8083063 Iustin Pop
3147 a8083063 Iustin Pop
    return result
3148 a8083063 Iustin Pop
3149 a8083063 Iustin Pop
3150 a8083063 Iustin Pop
class LUQueryNodeData(NoHooksLU):
3151 a8083063 Iustin Pop
  """Logical unit for querying node data.
3152 a8083063 Iustin Pop

3153 a8083063 Iustin Pop
  """
3154 a8083063 Iustin Pop
  _OP_REQP = ["nodes"]
3155 a8083063 Iustin Pop
3156 a8083063 Iustin Pop
  def CheckPrereq(self):
3157 a8083063 Iustin Pop
    """Check prerequisites.
3158 a8083063 Iustin Pop

3159 a8083063 Iustin Pop
    This only checks the optional node list against the existing names.
3160 a8083063 Iustin Pop

3161 a8083063 Iustin Pop
    """
3162 dcb93971 Michael Hanselmann
    self.wanted_nodes = _GetWantedNodes(self, self.op.nodes)
3163 a8083063 Iustin Pop
3164 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3165 a8083063 Iustin Pop
    """Compute and return the list of nodes.
3166 a8083063 Iustin Pop

3167 a8083063 Iustin Pop
    """
3168 a8083063 Iustin Pop
3169 a8083063 Iustin Pop
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3170 a8083063 Iustin Pop
             in self.cfg.GetInstanceList()]
3171 a8083063 Iustin Pop
    result = []
3172 a8083063 Iustin Pop
    for node in self.wanted_nodes:
3173 a8083063 Iustin Pop
      result.append((node.name, node.primary_ip, node.secondary_ip,
3174 a8083063 Iustin Pop
                     [inst.name for inst in ilist
3175 a8083063 Iustin Pop
                      if inst.primary_node == node.name],
3176 a8083063 Iustin Pop
                     [inst.name for inst in ilist
3177 a8083063 Iustin Pop
                      if node.name in inst.secondary_nodes],
3178 a8083063 Iustin Pop
                     ))
3179 a8083063 Iustin Pop
    return result
3180 a8083063 Iustin Pop
3181 a8083063 Iustin Pop
3182 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
3183 a8083063 Iustin Pop
  """Modifies an instances's parameters.
3184 a8083063 Iustin Pop

3185 a8083063 Iustin Pop
  """
3186 a8083063 Iustin Pop
  HPATH = "instance-modify"
3187 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3188 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3189 a8083063 Iustin Pop
3190 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3191 a8083063 Iustin Pop
    """Build hooks env.
3192 a8083063 Iustin Pop

3193 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
3194 a8083063 Iustin Pop

3195 a8083063 Iustin Pop
    """
3196 a8083063 Iustin Pop
    env = {
3197 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
3198 a8083063 Iustin Pop
      }
3199 a8083063 Iustin Pop
    if self.mem:
3200 a8083063 Iustin Pop
      env["MEM_SIZE"] = self.mem
3201 a8083063 Iustin Pop
    if self.vcpus:
3202 a8083063 Iustin Pop
      env["VCPUS"] = self.vcpus
3203 a8083063 Iustin Pop
    if self.do_ip:
3204 a8083063 Iustin Pop
      env["INSTANCE_IP"] = self.ip
3205 a8083063 Iustin Pop
    if self.bridge:
3206 a8083063 Iustin Pop
      env["BRIDGE"] = self.bridge
3207 a8083063 Iustin Pop
3208 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3209 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3210 a8083063 Iustin Pop
3211 a8083063 Iustin Pop
    return env, nl, nl
3212 a8083063 Iustin Pop
3213 a8083063 Iustin Pop
  def CheckPrereq(self):
3214 a8083063 Iustin Pop
    """Check prerequisites.
3215 a8083063 Iustin Pop

3216 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
3217 a8083063 Iustin Pop

3218 a8083063 Iustin Pop
    """
3219 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
3220 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
3221 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
3222 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
3223 a8083063 Iustin Pop
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
3224 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("No changes submitted")
3225 a8083063 Iustin Pop
    if self.mem is not None:
3226 a8083063 Iustin Pop
      try:
3227 a8083063 Iustin Pop
        self.mem = int(self.mem)
3228 a8083063 Iustin Pop
      except ValueError, err:
3229 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Invalid memory size: %s" % str(err))
3230 a8083063 Iustin Pop
    if self.vcpus is not None:
3231 a8083063 Iustin Pop
      try:
3232 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
3233 a8083063 Iustin Pop
      except ValueError, err:
3234 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Invalid vcpus number: %s" % str(err))
3235 a8083063 Iustin Pop
    if self.ip is not None:
3236 a8083063 Iustin Pop
      self.do_ip = True
3237 a8083063 Iustin Pop
      if self.ip.lower() == "none":
3238 a8083063 Iustin Pop
        self.ip = None
3239 a8083063 Iustin Pop
      else:
3240 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
3241 a8083063 Iustin Pop
          raise errors.OpPrereqError, ("Invalid IP address '%s'." % self.ip)
3242 a8083063 Iustin Pop
    else:
3243 a8083063 Iustin Pop
      self.do_ip = False
3244 a8083063 Iustin Pop
3245 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3246 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3247 a8083063 Iustin Pop
    if instance is None:
3248 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("No such instance name '%s'" %
3249 a8083063 Iustin Pop
                                   self.op.instance_name)
3250 a8083063 Iustin Pop
    self.op.instance_name = instance.name
3251 a8083063 Iustin Pop
    self.instance = instance
3252 a8083063 Iustin Pop
    return
3253 a8083063 Iustin Pop
3254 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3255 a8083063 Iustin Pop
    """Modifies an instance.
3256 a8083063 Iustin Pop

3257 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
3258 a8083063 Iustin Pop
    """
3259 a8083063 Iustin Pop
    result = []
3260 a8083063 Iustin Pop
    instance = self.instance
3261 a8083063 Iustin Pop
    if self.mem:
3262 a8083063 Iustin Pop
      instance.memory = self.mem
3263 a8083063 Iustin Pop
      result.append(("mem", self.mem))
3264 a8083063 Iustin Pop
    if self.vcpus:
3265 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
3266 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
3267 a8083063 Iustin Pop
    if self.do_ip:
3268 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
3269 a8083063 Iustin Pop
      result.append(("ip", self.ip))
3270 a8083063 Iustin Pop
    if self.bridge:
3271 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
3272 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
3273 a8083063 Iustin Pop
3274 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3275 a8083063 Iustin Pop
3276 a8083063 Iustin Pop
    return result
3277 a8083063 Iustin Pop
3278 a8083063 Iustin Pop
3279 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
3280 a8083063 Iustin Pop
  """Query the exports list
3281 a8083063 Iustin Pop

3282 a8083063 Iustin Pop
  """
3283 a8083063 Iustin Pop
  _OP_REQP = []
3284 a8083063 Iustin Pop
3285 a8083063 Iustin Pop
  def CheckPrereq(self):
3286 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
3287 a8083063 Iustin Pop

3288 a8083063 Iustin Pop
    """
3289 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
3290 a8083063 Iustin Pop
3291 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3292 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
3293 a8083063 Iustin Pop

3294 a8083063 Iustin Pop
    Returns:
3295 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
3296 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
3297 a8083063 Iustin Pop
      that node.
3298 a8083063 Iustin Pop

3299 a8083063 Iustin Pop
    """
3300 dcb93971 Michael Hanselmann
    return rpc.call_export_list([node.name for node in self.nodes])
3301 a8083063 Iustin Pop
3302 a8083063 Iustin Pop
3303 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
3304 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
3305 a8083063 Iustin Pop

3306 a8083063 Iustin Pop
  """
3307 a8083063 Iustin Pop
  HPATH = "instance-export"
3308 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3309 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
3310 a8083063 Iustin Pop
3311 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3312 a8083063 Iustin Pop
    """Build hooks env.
3313 a8083063 Iustin Pop

3314 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
3315 a8083063 Iustin Pop

3316 a8083063 Iustin Pop
    """
3317 a8083063 Iustin Pop
    env = {
3318 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
3319 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
3320 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
3321 a8083063 Iustin Pop
      }
3322 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3323 a8083063 Iustin Pop
          self.op.target_node]
3324 a8083063 Iustin Pop
    return env, nl, nl
3325 a8083063 Iustin Pop
3326 a8083063 Iustin Pop
  def CheckPrereq(self):
3327 a8083063 Iustin Pop
    """Check prerequisites.
3328 a8083063 Iustin Pop

3329 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
3330 a8083063 Iustin Pop

3331 a8083063 Iustin Pop
    """
3332 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
3333 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
3334 a8083063 Iustin Pop
    if self.instance is None:
3335 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not found" %
3336 a8083063 Iustin Pop
                                   self.op.instance_name)
3337 a8083063 Iustin Pop
3338 a8083063 Iustin Pop
    # node verification
3339 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
3340 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
3341 a8083063 Iustin Pop
3342 a8083063 Iustin Pop
    if self.dst_node is None:
3343 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Destination node '%s' is uknown." %
3344 a8083063 Iustin Pop
                                   self.op.target_node)
3345 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
3346 a8083063 Iustin Pop
3347 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3348 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
3349 a8083063 Iustin Pop

3350 a8083063 Iustin Pop
    """
3351 a8083063 Iustin Pop
    instance = self.instance
3352 a8083063 Iustin Pop
    dst_node = self.dst_node
3353 a8083063 Iustin Pop
    src_node = instance.primary_node
3354 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
3355 a8083063 Iustin Pop
    if self.op.shutdown:
3356 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
3357 a8083063 Iustin Pop
      self.processor.ChainOpCode(op, feedback_fn)
3358 a8083063 Iustin Pop
3359 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
3360 a8083063 Iustin Pop
3361 a8083063 Iustin Pop
    snap_disks = []
3362 a8083063 Iustin Pop
3363 a8083063 Iustin Pop
    try:
3364 a8083063 Iustin Pop
      for disk in instance.disks:
3365 a8083063 Iustin Pop
        if disk.iv_name == "sda":
3366 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
3367 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
3368 a8083063 Iustin Pop
3369 a8083063 Iustin Pop
          if not new_dev_name:
3370 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
3371 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
3372 a8083063 Iustin Pop
          else:
3373 a8083063 Iustin Pop
            new_dev = objects.Disk(dev_type="lvm", size=disk.size,
3374 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
3375 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
3376 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
3377 a8083063 Iustin Pop
            snap_disks.append(new_dev)
3378 a8083063 Iustin Pop
3379 a8083063 Iustin Pop
    finally:
3380 a8083063 Iustin Pop
      if self.op.shutdown:
3381 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
3382 a8083063 Iustin Pop
                                       force=False)
3383 a8083063 Iustin Pop
        self.processor.ChainOpCode(op, feedback_fn)
3384 a8083063 Iustin Pop
3385 a8083063 Iustin Pop
    # TODO: check for size
3386 a8083063 Iustin Pop
3387 a8083063 Iustin Pop
    for dev in snap_disks:
3388 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
3389 a8083063 Iustin Pop
                                           instance):
3390 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
3391 a8083063 Iustin Pop
                     " %s to node %s" %
3392 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
3393 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
3394 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
3395 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
3396 a8083063 Iustin Pop
3397 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
3398 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
3399 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
3400 a8083063 Iustin Pop
3401 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
3402 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
3403 a8083063 Iustin Pop
3404 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
3405 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
3406 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
3407 a8083063 Iustin Pop
    if nodelist:
3408 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
3409 a8083063 Iustin Pop
      exportlist = self.processor.ChainOpCode(op, feedback_fn)
3410 a8083063 Iustin Pop
      for node in exportlist:
3411 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
3412 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
3413 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
3414 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))