Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ d0834de3

History | View | Annotate | Download (110 kB)

1 a8083063 Iustin Pop
#!/usr/bin/python
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import socket
30 a8083063 Iustin Pop
import time
31 a8083063 Iustin Pop
import tempfile
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 a8083063 Iustin Pop
35 a8083063 Iustin Pop
from ganeti import rpc
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 a8083063 Iustin Pop
from ganeti import config
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 a8083063 Iustin Pop
from ganeti import ssconf
46 a8083063 Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 a8083063 Iustin Pop
  """Logical Unit base class..
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
52 a8083063 Iustin Pop
      with all the fields (even if as None)
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
57 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 a8083063 Iustin Pop
  REQ_CLUSTER = True
64 a8083063 Iustin Pop
  REQ_MASTER = True
65 a8083063 Iustin Pop
66 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
67 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
68 a8083063 Iustin Pop

69 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
70 a8083063 Iustin Pop
    validity.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    """
73 a8083063 Iustin Pop
    self.processor = processor
74 a8083063 Iustin Pop
    self.op = op
75 a8083063 Iustin Pop
    self.cfg = cfg
76 a8083063 Iustin Pop
    self.sstore = sstore
77 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
78 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
79 a8083063 Iustin Pop
      if attr_val is None:
80 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Required parameter '%s' missing" %
81 a8083063 Iustin Pop
                                     attr_name)
82 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
83 a8083063 Iustin Pop
      if not cfg.IsCluster():
84 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Cluster not initialized yet,"
85 a8083063 Iustin Pop
                                     " use 'gnt-cluster init' first.")
86 a8083063 Iustin Pop
      if self.REQ_MASTER:
87 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
88 a8083063 Iustin Pop
        if master != socket.gethostname():
89 a8083063 Iustin Pop
          raise errors.OpPrereqError, ("Commands must be run on the master"
90 a8083063 Iustin Pop
                                       " node %s" % master)
91 a8083063 Iustin Pop
92 a8083063 Iustin Pop
  def CheckPrereq(self):
93 a8083063 Iustin Pop
    """Check prerequisites for this LU.
94 a8083063 Iustin Pop

95 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
96 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
97 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
98 a8083063 Iustin Pop
    allowed.
99 a8083063 Iustin Pop

100 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
101 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
102 a8083063 Iustin Pop

103 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
104 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
105 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
106 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
107 a8083063 Iustin Pop

108 a8083063 Iustin Pop
    """
109 a8083063 Iustin Pop
    raise NotImplementedError
110 a8083063 Iustin Pop
111 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
112 a8083063 Iustin Pop
    """Execute the LU.
113 a8083063 Iustin Pop

114 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
115 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
116 a8083063 Iustin Pop
    code, or expected.
117 a8083063 Iustin Pop

118 a8083063 Iustin Pop
    """
119 a8083063 Iustin Pop
    raise NotImplementedError
120 a8083063 Iustin Pop
121 a8083063 Iustin Pop
  def BuildHooksEnv(self):
122 a8083063 Iustin Pop
    """Build hooks environment for this LU.
123 a8083063 Iustin Pop

124 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
125 a8083063 Iustin Pop
    containing the environment that will be used for running the
126 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
127 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
128 a8083063 Iustin Pop
    the hook should run after the execution.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
131 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
132 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
133 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
134 a8083063 Iustin Pop

135 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
136 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
137 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
138 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
139 a8083063 Iustin Pop
    None).
140 a8083063 Iustin Pop

141 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
142 a8083063 Iustin Pop
    not be called.
143 a8083063 Iustin Pop

144 a8083063 Iustin Pop
    """
145 a8083063 Iustin Pop
    raise NotImplementedError
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
148 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
149 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
150 a8083063 Iustin Pop

151 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
152 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
153 a8083063 Iustin Pop

154 a8083063 Iustin Pop
  """
155 a8083063 Iustin Pop
  HPATH = None
156 a8083063 Iustin Pop
  HTYPE = None
157 a8083063 Iustin Pop
158 a8083063 Iustin Pop
  def BuildHooksEnv(self):
159 a8083063 Iustin Pop
    """Build hooks env.
160 a8083063 Iustin Pop

161 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
162 a8083063 Iustin Pop

163 a8083063 Iustin Pop
    """
164 a8083063 Iustin Pop
    return
165 a8083063 Iustin Pop
166 a8083063 Iustin Pop
167 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
168 83120a01 Michael Hanselmann
  """Returns list of checked and expanded nodes.
169 83120a01 Michael Hanselmann

170 83120a01 Michael Hanselmann
  Args:
171 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
172 83120a01 Michael Hanselmann

173 83120a01 Michael Hanselmann
  """
174 dcb93971 Michael Hanselmann
  if nodes is not None and not isinstance(nodes, list):
175 dcb93971 Michael Hanselmann
    raise errors.OpPrereqError, "Invalid argument type 'nodes'"
176 dcb93971 Michael Hanselmann
177 dcb93971 Michael Hanselmann
  if nodes:
178 dcb93971 Michael Hanselmann
    wanted_nodes = []
179 dcb93971 Michael Hanselmann
180 dcb93971 Michael Hanselmann
    for name in nodes:
181 dcb93971 Michael Hanselmann
      node = lu.cfg.GetNodeInfo(lu.cfg.ExpandNodeName(name))
182 dcb93971 Michael Hanselmann
      if node is None:
183 dcb93971 Michael Hanselmann
        raise errors.OpPrereqError, ("No such node name '%s'" % name)
184 dcb93971 Michael Hanselmann
    wanted_nodes.append(node)
185 dcb93971 Michael Hanselmann
186 dcb93971 Michael Hanselmann
    return wanted_nodes
187 dcb93971 Michael Hanselmann
  else:
188 dcb93971 Michael Hanselmann
    return [lu.cfg.GetNodeInfo(name) for name in lu.cfg.GetNodeList()]
189 dcb93971 Michael Hanselmann
190 dcb93971 Michael Hanselmann
191 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
192 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
193 83120a01 Michael Hanselmann

194 83120a01 Michael Hanselmann
  Args:
195 83120a01 Michael Hanselmann
    static: Static fields
196 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
197 83120a01 Michael Hanselmann

198 83120a01 Michael Hanselmann
  """
199 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
200 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
201 dcb93971 Michael Hanselmann
202 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
203 dcb93971 Michael Hanselmann
204 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
205 83120a01 Michael Hanselmann
    raise errors.OpPrereqError, ("Unknown output fields selected: %s"
206 83120a01 Michael Hanselmann
                                 % ",".join(frozenset(selected).
207 83120a01 Michael Hanselmann
                                            difference(all_fields)))
208 dcb93971 Michael Hanselmann
209 dcb93971 Michael Hanselmann
210 a8083063 Iustin Pop
def _UpdateEtcHosts(fullnode, ip):
211 a8083063 Iustin Pop
  """Ensure a node has a correct entry in /etc/hosts.
212 a8083063 Iustin Pop

213 a8083063 Iustin Pop
  Args:
214 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
215 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
216 a8083063 Iustin Pop

217 a8083063 Iustin Pop
  """
218 a8083063 Iustin Pop
  node = fullnode.split(".", 1)[0]
219 a8083063 Iustin Pop
220 a8083063 Iustin Pop
  f = open('/etc/hosts', 'r+')
221 a8083063 Iustin Pop
222 a8083063 Iustin Pop
  inthere = False
223 a8083063 Iustin Pop
224 a8083063 Iustin Pop
  save_lines = []
225 a8083063 Iustin Pop
  add_lines = []
226 a8083063 Iustin Pop
  removed = False
227 a8083063 Iustin Pop
228 a8083063 Iustin Pop
  while True:
229 a8083063 Iustin Pop
    rawline = f.readline()
230 a8083063 Iustin Pop
231 a8083063 Iustin Pop
    if not rawline:
232 a8083063 Iustin Pop
      # End of file
233 a8083063 Iustin Pop
      break
234 a8083063 Iustin Pop
235 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
236 a8083063 Iustin Pop
237 a8083063 Iustin Pop
    # Strip off comments
238 a8083063 Iustin Pop
    line = line.split('#')[0]
239 a8083063 Iustin Pop
240 a8083063 Iustin Pop
    if not line:
241 a8083063 Iustin Pop
      # Entire line was comment, skip
242 a8083063 Iustin Pop
      save_lines.append(rawline)
243 a8083063 Iustin Pop
      continue
244 a8083063 Iustin Pop
245 a8083063 Iustin Pop
    fields = line.split()
246 a8083063 Iustin Pop
247 a8083063 Iustin Pop
    haveall = True
248 a8083063 Iustin Pop
    havesome = False
249 a8083063 Iustin Pop
    for spec in [ ip, fullnode, node ]:
250 a8083063 Iustin Pop
      if spec not in fields:
251 a8083063 Iustin Pop
        haveall = False
252 a8083063 Iustin Pop
      if spec in fields:
253 a8083063 Iustin Pop
        havesome = True
254 a8083063 Iustin Pop
255 a8083063 Iustin Pop
    if haveall:
256 a8083063 Iustin Pop
      inthere = True
257 a8083063 Iustin Pop
      save_lines.append(rawline)
258 a8083063 Iustin Pop
      continue
259 a8083063 Iustin Pop
260 a8083063 Iustin Pop
    if havesome and not haveall:
261 a8083063 Iustin Pop
      # Line (old, or manual?) which is missing some.  Remove.
262 a8083063 Iustin Pop
      removed = True
263 a8083063 Iustin Pop
      continue
264 a8083063 Iustin Pop
265 a8083063 Iustin Pop
    save_lines.append(rawline)
266 a8083063 Iustin Pop
267 a8083063 Iustin Pop
  if not inthere:
268 a8083063 Iustin Pop
    add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
269 a8083063 Iustin Pop
270 a8083063 Iustin Pop
  if removed:
271 a8083063 Iustin Pop
    if add_lines:
272 a8083063 Iustin Pop
      save_lines = save_lines + add_lines
273 a8083063 Iustin Pop
274 a8083063 Iustin Pop
    # We removed a line, write a new file and replace old.
275 a8083063 Iustin Pop
    fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc')
276 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
277 a8083063 Iustin Pop
    newfile.write(''.join(save_lines))
278 a8083063 Iustin Pop
    newfile.close()
279 a8083063 Iustin Pop
    os.rename(tmpname, '/etc/hosts')
280 a8083063 Iustin Pop
281 a8083063 Iustin Pop
  elif add_lines:
282 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
283 a8083063 Iustin Pop
    f.seek(0, 2)
284 a8083063 Iustin Pop
    for add in add_lines:
285 a8083063 Iustin Pop
      f.write(add)
286 a8083063 Iustin Pop
287 a8083063 Iustin Pop
  f.close()
288 a8083063 Iustin Pop
289 a8083063 Iustin Pop
290 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
291 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
292 a8083063 Iustin Pop

293 a8083063 Iustin Pop
  Args:
294 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
295 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
296 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
297 a8083063 Iustin Pop

298 a8083063 Iustin Pop
  """
299 a8083063 Iustin Pop
  if os.path.exists('/etc/ssh/ssh_known_hosts'):
300 a8083063 Iustin Pop
    f = open('/etc/ssh/ssh_known_hosts', 'r+')
301 a8083063 Iustin Pop
  else:
302 a8083063 Iustin Pop
    f = open('/etc/ssh/ssh_known_hosts', 'w+')
303 a8083063 Iustin Pop
304 a8083063 Iustin Pop
  inthere = False
305 a8083063 Iustin Pop
306 a8083063 Iustin Pop
  save_lines = []
307 a8083063 Iustin Pop
  add_lines = []
308 a8083063 Iustin Pop
  removed = False
309 a8083063 Iustin Pop
310 a8083063 Iustin Pop
  while True:
311 a8083063 Iustin Pop
    rawline = f.readline()
312 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
313 a8083063 Iustin Pop
314 a8083063 Iustin Pop
    if not rawline:
315 a8083063 Iustin Pop
      # End of file
316 a8083063 Iustin Pop
      break
317 a8083063 Iustin Pop
318 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
319 a8083063 Iustin Pop
320 a8083063 Iustin Pop
    parts = line.split(' ')
321 a8083063 Iustin Pop
    fields = parts[0].split(',')
322 a8083063 Iustin Pop
    key = parts[2]
323 a8083063 Iustin Pop
324 a8083063 Iustin Pop
    haveall = True
325 a8083063 Iustin Pop
    havesome = False
326 a8083063 Iustin Pop
    for spec in [ ip, fullnode ]:
327 a8083063 Iustin Pop
      if spec not in fields:
328 a8083063 Iustin Pop
        haveall = False
329 a8083063 Iustin Pop
      if spec in fields:
330 a8083063 Iustin Pop
        havesome = True
331 a8083063 Iustin Pop
332 a8083063 Iustin Pop
    logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
333 a8083063 Iustin Pop
    if haveall and key == pubkey:
334 a8083063 Iustin Pop
      inthere = True
335 a8083063 Iustin Pop
      save_lines.append(rawline)
336 a8083063 Iustin Pop
      logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
337 a8083063 Iustin Pop
      continue
338 a8083063 Iustin Pop
339 a8083063 Iustin Pop
    if havesome and (not haveall or key != pubkey):
340 a8083063 Iustin Pop
      removed = True
341 a8083063 Iustin Pop
      logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
342 a8083063 Iustin Pop
      continue
343 a8083063 Iustin Pop
344 a8083063 Iustin Pop
    save_lines.append(rawline)
345 a8083063 Iustin Pop
346 a8083063 Iustin Pop
  if not inthere:
347 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
348 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
349 a8083063 Iustin Pop
350 a8083063 Iustin Pop
  if removed:
351 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
352 a8083063 Iustin Pop
353 a8083063 Iustin Pop
    # Write a new file and replace old.
354 a8083063 Iustin Pop
    fd, tmpname = tempfile.mkstemp('tmp', 'ssh_known_hosts_', '/etc/ssh')
355 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
356 a8083063 Iustin Pop
    newfile.write(''.join(save_lines))
357 a8083063 Iustin Pop
    newfile.close()
358 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
359 a8083063 Iustin Pop
    os.rename(tmpname, '/etc/ssh/ssh_known_hosts')
360 a8083063 Iustin Pop
361 a8083063 Iustin Pop
  elif add_lines:
362 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
363 a8083063 Iustin Pop
    f.seek(0, 2)
364 a8083063 Iustin Pop
    for add in add_lines:
365 a8083063 Iustin Pop
      f.write(add)
366 a8083063 Iustin Pop
367 a8083063 Iustin Pop
  f.close()
368 a8083063 Iustin Pop
369 a8083063 Iustin Pop
370 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
371 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
372 a8083063 Iustin Pop

373 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
374 a8083063 Iustin Pop
  is the error message.
375 a8083063 Iustin Pop

376 a8083063 Iustin Pop
  """
377 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
378 a8083063 Iustin Pop
  if vgsize is None:
379 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
380 a8083063 Iustin Pop
  elif vgsize < 20480:
381 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
382 191a8385 Guido Trotter
            (vgname, vgsize))
383 a8083063 Iustin Pop
  return None
384 a8083063 Iustin Pop
385 a8083063 Iustin Pop
386 a8083063 Iustin Pop
def _InitSSHSetup(node):
387 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
388 a8083063 Iustin Pop

389 a8083063 Iustin Pop

390 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
391 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
392 a8083063 Iustin Pop

393 a8083063 Iustin Pop
  Args:
394 a8083063 Iustin Pop
    node: the name of this host as a fqdn
395 a8083063 Iustin Pop

396 a8083063 Iustin Pop
  """
397 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/known_hosts')
398 a8083063 Iustin Pop
399 a8083063 Iustin Pop
  if os.path.exists('/root/.ssh/id_dsa'):
400 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa')
401 a8083063 Iustin Pop
  if os.path.exists('/root/.ssh/id_dsa.pub'):
402 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa.pub')
403 a8083063 Iustin Pop
404 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/id_dsa')
405 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/id_dsa.pub')
406 a8083063 Iustin Pop
407 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
408 a8083063 Iustin Pop
                         "-f", "/root/.ssh/id_dsa",
409 a8083063 Iustin Pop
                         "-q", "-N", ""])
410 a8083063 Iustin Pop
  if result.failed:
411 a8083063 Iustin Pop
    raise errors.OpExecError, ("could not generate ssh keypair, error %s" %
412 a8083063 Iustin Pop
                               result.output)
413 a8083063 Iustin Pop
414 a8083063 Iustin Pop
  f = open('/root/.ssh/id_dsa.pub', 'r')
415 a8083063 Iustin Pop
  try:
416 a8083063 Iustin Pop
    utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192))
417 a8083063 Iustin Pop
  finally:
418 a8083063 Iustin Pop
    f.close()
419 a8083063 Iustin Pop
420 a8083063 Iustin Pop
421 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
422 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
423 a8083063 Iustin Pop

424 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
425 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
426 a8083063 Iustin Pop

427 a8083063 Iustin Pop
  """
428 a8083063 Iustin Pop
  # Create pseudo random password
429 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
430 a8083063 Iustin Pop
  # and write it into sstore
431 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
432 a8083063 Iustin Pop
433 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
434 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
435 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
436 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
437 a8083063 Iustin Pop
  if result.failed:
438 a8083063 Iustin Pop
    raise errors.OpExecError, ("could not generate server ssl cert, command"
439 a8083063 Iustin Pop
                               " %s had exitcode %s and error message %s" %
440 a8083063 Iustin Pop
                               (result.cmd, result.exit_code, result.output))
441 a8083063 Iustin Pop
442 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
443 a8083063 Iustin Pop
444 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
445 a8083063 Iustin Pop
446 a8083063 Iustin Pop
  if result.failed:
447 a8083063 Iustin Pop
    raise errors.OpExecError, ("could not start the node daemon, command %s"
448 a8083063 Iustin Pop
                               " had exitcode %s and error %s" %
449 a8083063 Iustin Pop
                               (result.cmd, result.exit_code, result.output))
450 a8083063 Iustin Pop
451 a8083063 Iustin Pop
452 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
453 a8083063 Iustin Pop
  """Initialise the cluster.
454 a8083063 Iustin Pop

455 a8083063 Iustin Pop
  """
456 a8083063 Iustin Pop
  HPATH = "cluster-init"
457 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
458 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
459 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
460 a8083063 Iustin Pop
  REQ_CLUSTER = False
461 a8083063 Iustin Pop
462 a8083063 Iustin Pop
  def BuildHooksEnv(self):
463 a8083063 Iustin Pop
    """Build hooks env.
464 a8083063 Iustin Pop

465 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
466 a8083063 Iustin Pop
    ourselves in the post-run node list.
467 a8083063 Iustin Pop

468 a8083063 Iustin Pop
    """
469 a8083063 Iustin Pop
    env = {"CLUSTER": self.op.cluster_name,
470 880478f8 Iustin Pop
           "MASTER": self.hostname['hostname_full']}
471 a8083063 Iustin Pop
    return env, [], [self.hostname['hostname_full']]
472 a8083063 Iustin Pop
473 a8083063 Iustin Pop
  def CheckPrereq(self):
474 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
475 a8083063 Iustin Pop

476 a8083063 Iustin Pop
    """
477 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
478 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Cluster is already initialised")
479 a8083063 Iustin Pop
480 a8083063 Iustin Pop
    hostname_local = socket.gethostname()
481 a8083063 Iustin Pop
    self.hostname = hostname = utils.LookupHostname(hostname_local)
482 a8083063 Iustin Pop
    if not hostname:
483 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Cannot resolve my own hostname ('%s')" %
484 a8083063 Iustin Pop
                                   hostname_local)
485 a8083063 Iustin Pop
486 a8083063 Iustin Pop
    self.clustername = clustername = utils.LookupHostname(self.op.cluster_name)
487 a8083063 Iustin Pop
    if not clustername:
488 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Cannot resolve given cluster name ('%s')"
489 a8083063 Iustin Pop
                                   % self.op.cluster_name)
490 a8083063 Iustin Pop
491 a8083063 Iustin Pop
    result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname['ip']])
492 a8083063 Iustin Pop
    if result.failed:
493 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Inconsistency: this host's name resolves"
494 a8083063 Iustin Pop
                                   " to %s,\nbut this ip address does not"
495 a8083063 Iustin Pop
                                   " belong to this host."
496 a8083063 Iustin Pop
                                   " Aborting." % hostname['ip'])
497 a8083063 Iustin Pop
498 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
499 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
500 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid secondary ip given")
501 a8083063 Iustin Pop
    if secondary_ip and secondary_ip != hostname['ip']:
502 a8083063 Iustin Pop
      result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip])
503 a8083063 Iustin Pop
      if result.failed:
504 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("You gave %s as secondary IP,\n"
505 a8083063 Iustin Pop
                                     "but it does not belong to this host." %
506 a8083063 Iustin Pop
                                     secondary_ip)
507 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
508 a8083063 Iustin Pop
509 a8083063 Iustin Pop
    # checks presence of the volume group given
510 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
511 a8083063 Iustin Pop
512 a8083063 Iustin Pop
    if vgstatus:
513 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Error: %s" % vgstatus)
514 a8083063 Iustin Pop
515 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
516 a8083063 Iustin Pop
                    self.op.mac_prefix):
517 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid mac prefix given '%s'" %
518 a8083063 Iustin Pop
                                   self.op.mac_prefix)
519 a8083063 Iustin Pop
520 a8083063 Iustin Pop
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
521 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid hypervisor type given '%s'" %
522 a8083063 Iustin Pop
                                   self.op.hypervisor_type)
523 a8083063 Iustin Pop
524 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
525 880478f8 Iustin Pop
    if result.failed:
526 880478f8 Iustin Pop
      raise errors.OpPrereqError, ("Invalid master netdev given (%s): '%s'" %
527 880478f8 Iustin Pop
                                   (self.op.master_netdev, result.output))
528 880478f8 Iustin Pop
529 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
530 a8083063 Iustin Pop
    """Initialize the cluster.
531 a8083063 Iustin Pop

532 a8083063 Iustin Pop
    """
533 a8083063 Iustin Pop
    clustername = self.clustername
534 a8083063 Iustin Pop
    hostname = self.hostname
535 a8083063 Iustin Pop
536 a8083063 Iustin Pop
    # set up the simple store
537 a8083063 Iustin Pop
    ss = ssconf.SimpleStore()
538 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
539 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname['hostname_full'])
540 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername['ip'])
541 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
542 5fcdc80d Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername['hostname'])
543 a8083063 Iustin Pop
544 a8083063 Iustin Pop
    # set up the inter-node password and certificate
545 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
546 a8083063 Iustin Pop
547 a8083063 Iustin Pop
    # start the master ip
548 a8083063 Iustin Pop
    rpc.call_node_start_master(hostname['hostname_full'])
549 a8083063 Iustin Pop
550 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
551 a8083063 Iustin Pop
    f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r')
552 a8083063 Iustin Pop
    try:
553 a8083063 Iustin Pop
      sshline = f.read()
554 a8083063 Iustin Pop
    finally:
555 a8083063 Iustin Pop
      f.close()
556 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
557 a8083063 Iustin Pop
558 a8083063 Iustin Pop
    _UpdateEtcHosts(hostname['hostname_full'],
559 a8083063 Iustin Pop
                    hostname['ip'],
560 a8083063 Iustin Pop
                    )
561 a8083063 Iustin Pop
562 a8083063 Iustin Pop
    _UpdateKnownHosts(hostname['hostname_full'],
563 a8083063 Iustin Pop
                      hostname['ip'],
564 a8083063 Iustin Pop
                      sshkey,
565 a8083063 Iustin Pop
                      )
566 a8083063 Iustin Pop
567 a8083063 Iustin Pop
    _InitSSHSetup(hostname['hostname'])
568 a8083063 Iustin Pop
569 a8083063 Iustin Pop
    # init of cluster config file
570 a8083063 Iustin Pop
    cfgw = config.ConfigWriter()
571 a8083063 Iustin Pop
    cfgw.InitConfig(hostname['hostname'], hostname['ip'], self.secondary_ip,
572 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
573 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
574 a8083063 Iustin Pop
575 a8083063 Iustin Pop
576 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
577 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
578 a8083063 Iustin Pop

579 a8083063 Iustin Pop
  """
580 a8083063 Iustin Pop
  _OP_REQP = []
581 a8083063 Iustin Pop
582 a8083063 Iustin Pop
  def CheckPrereq(self):
583 a8083063 Iustin Pop
    """Check prerequisites.
584 a8083063 Iustin Pop

585 a8083063 Iustin Pop
    This checks whether the cluster is empty.
586 a8083063 Iustin Pop

587 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
588 a8083063 Iustin Pop

589 a8083063 Iustin Pop
    """
590 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
591 a8083063 Iustin Pop
592 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
593 a8083063 Iustin Pop
    if len(nodelist) > 0 and nodelist != [master]:
594 880478f8 Iustin Pop
      raise errors.OpPrereqError, ("There are still %d node(s) in "
595 880478f8 Iustin Pop
                                   "this cluster." % (len(nodelist) - 1))
596 a8083063 Iustin Pop
597 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
598 a8083063 Iustin Pop
    """Destroys the cluster.
599 a8083063 Iustin Pop

600 a8083063 Iustin Pop
    """
601 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa')
602 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa.pub')
603 880478f8 Iustin Pop
    rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
604 a8083063 Iustin Pop
605 a8083063 Iustin Pop
606 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
607 a8083063 Iustin Pop
  """Verifies the cluster status.
608 a8083063 Iustin Pop

609 a8083063 Iustin Pop
  """
610 a8083063 Iustin Pop
  _OP_REQP = []
611 a8083063 Iustin Pop
612 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
613 a8083063 Iustin Pop
                  remote_version, feedback_fn):
614 a8083063 Iustin Pop
    """Run multiple tests against a node.
615 a8083063 Iustin Pop

616 a8083063 Iustin Pop
    Test list:
617 a8083063 Iustin Pop
      - compares ganeti version
618 a8083063 Iustin Pop
      - checks vg existance and size > 20G
619 a8083063 Iustin Pop
      - checks config file checksum
620 a8083063 Iustin Pop
      - checks ssh to other nodes
621 a8083063 Iustin Pop

622 a8083063 Iustin Pop
    Args:
623 a8083063 Iustin Pop
      node: name of the node to check
624 a8083063 Iustin Pop
      file_list: required list of files
625 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
626 098c0958 Michael Hanselmann

627 a8083063 Iustin Pop
    """
628 a8083063 Iustin Pop
    # compares ganeti version
629 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
630 a8083063 Iustin Pop
    if not remote_version:
631 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
632 a8083063 Iustin Pop
      return True
633 a8083063 Iustin Pop
634 a8083063 Iustin Pop
    if local_version != remote_version:
635 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
636 a8083063 Iustin Pop
                      (local_version, node, remote_version))
637 a8083063 Iustin Pop
      return True
638 a8083063 Iustin Pop
639 a8083063 Iustin Pop
    # checks vg existance and size > 20G
640 a8083063 Iustin Pop
641 a8083063 Iustin Pop
    bad = False
642 a8083063 Iustin Pop
    if not vglist:
643 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
644 a8083063 Iustin Pop
                      (node,))
645 a8083063 Iustin Pop
      bad = True
646 a8083063 Iustin Pop
    else:
647 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
648 a8083063 Iustin Pop
      if vgstatus:
649 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
650 a8083063 Iustin Pop
        bad = True
651 a8083063 Iustin Pop
652 a8083063 Iustin Pop
    # checks config file checksum
653 a8083063 Iustin Pop
    # checks ssh to any
654 a8083063 Iustin Pop
655 a8083063 Iustin Pop
    if 'filelist' not in node_result:
656 a8083063 Iustin Pop
      bad = True
657 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
658 a8083063 Iustin Pop
    else:
659 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
660 a8083063 Iustin Pop
      for file_name in file_list:
661 a8083063 Iustin Pop
        if file_name not in remote_cksum:
662 a8083063 Iustin Pop
          bad = True
663 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
664 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
665 a8083063 Iustin Pop
          bad = True
666 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
667 a8083063 Iustin Pop
668 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
669 a8083063 Iustin Pop
      bad = True
670 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
671 a8083063 Iustin Pop
    else:
672 a8083063 Iustin Pop
      if node_result['nodelist']:
673 a8083063 Iustin Pop
        bad = True
674 a8083063 Iustin Pop
        for node in node_result['nodelist']:
675 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
676 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
677 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
678 a8083063 Iustin Pop
    if hyp_result is not None:
679 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
680 a8083063 Iustin Pop
    return bad
681 a8083063 Iustin Pop
682 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
683 a8083063 Iustin Pop
    """Verify an instance.
684 a8083063 Iustin Pop

685 a8083063 Iustin Pop
    This function checks to see if the required block devices are
686 a8083063 Iustin Pop
    available on the instance's node.
687 a8083063 Iustin Pop

688 a8083063 Iustin Pop
    """
689 a8083063 Iustin Pop
    bad = False
690 a8083063 Iustin Pop
691 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
692 a8083063 Iustin Pop
    if not instance in instancelist:
693 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
694 a8083063 Iustin Pop
                      (instance, instancelist))
695 a8083063 Iustin Pop
      bad = True
696 a8083063 Iustin Pop
697 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
698 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
699 a8083063 Iustin Pop
700 a8083063 Iustin Pop
    node_vol_should = {}
701 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
702 a8083063 Iustin Pop
703 a8083063 Iustin Pop
    for node in node_vol_should:
704 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
705 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
706 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
707 a8083063 Iustin Pop
                          (volume, node))
708 a8083063 Iustin Pop
          bad = True
709 a8083063 Iustin Pop
710 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
711 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
712 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
713 a8083063 Iustin Pop
                        (instance, node_current))
714 a8083063 Iustin Pop
        bad = True
715 a8083063 Iustin Pop
716 a8083063 Iustin Pop
    for node in node_instance:
717 a8083063 Iustin Pop
      if (not node == node_current):
718 a8083063 Iustin Pop
        if instance in node_instance[node]:
719 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
720 a8083063 Iustin Pop
                          (instance, node))
721 a8083063 Iustin Pop
          bad = True
722 a8083063 Iustin Pop
723 a8083063 Iustin Pop
    return not bad
724 a8083063 Iustin Pop
725 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
726 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
727 a8083063 Iustin Pop

728 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
729 a8083063 Iustin Pop
    reported as unknown.
730 a8083063 Iustin Pop

731 a8083063 Iustin Pop
    """
732 a8083063 Iustin Pop
    bad = False
733 a8083063 Iustin Pop
734 a8083063 Iustin Pop
    for node in node_vol_is:
735 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
736 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
737 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
738 a8083063 Iustin Pop
                      (volume, node))
739 a8083063 Iustin Pop
          bad = True
740 a8083063 Iustin Pop
    return bad
741 a8083063 Iustin Pop
742 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
743 a8083063 Iustin Pop
    """Verify the list of running instances.
744 a8083063 Iustin Pop

745 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
746 a8083063 Iustin Pop

747 a8083063 Iustin Pop
    """
748 a8083063 Iustin Pop
    bad = False
749 a8083063 Iustin Pop
    for node in node_instance:
750 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
751 a8083063 Iustin Pop
        if runninginstance not in instancelist:
752 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
753 a8083063 Iustin Pop
                          (runninginstance, node))
754 a8083063 Iustin Pop
          bad = True
755 a8083063 Iustin Pop
    return bad
756 a8083063 Iustin Pop
757 a8083063 Iustin Pop
  def CheckPrereq(self):
758 a8083063 Iustin Pop
    """Check prerequisites.
759 a8083063 Iustin Pop

760 a8083063 Iustin Pop
    This has no prerequisites.
761 a8083063 Iustin Pop

762 a8083063 Iustin Pop
    """
763 a8083063 Iustin Pop
    pass
764 a8083063 Iustin Pop
765 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
766 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
767 a8083063 Iustin Pop

768 a8083063 Iustin Pop
    """
769 a8083063 Iustin Pop
    bad = False
770 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
771 a8083063 Iustin Pop
    self.cfg.VerifyConfig()
772 a8083063 Iustin Pop
773 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
774 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
775 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
776 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
777 a8083063 Iustin Pop
    node_volume = {}
778 a8083063 Iustin Pop
    node_instance = {}
779 a8083063 Iustin Pop
780 a8083063 Iustin Pop
    # FIXME: verify OS list
781 a8083063 Iustin Pop
    # do local checksums
782 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
783 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
784 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
785 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
786 a8083063 Iustin Pop
787 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
788 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
789 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
790 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
791 a8083063 Iustin Pop
    node_verify_param = {
792 a8083063 Iustin Pop
      'filelist': file_names,
793 a8083063 Iustin Pop
      'nodelist': nodelist,
794 a8083063 Iustin Pop
      'hypervisor': None,
795 a8083063 Iustin Pop
      }
796 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
797 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
798 a8083063 Iustin Pop
799 a8083063 Iustin Pop
    for node in nodelist:
800 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
801 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
802 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
803 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
804 a8083063 Iustin Pop
      bad = bad or result
805 a8083063 Iustin Pop
806 a8083063 Iustin Pop
      # node_volume
807 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
808 a8083063 Iustin Pop
809 a8083063 Iustin Pop
      if type(volumeinfo) != dict:
810 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
811 a8083063 Iustin Pop
        bad = True
812 a8083063 Iustin Pop
        continue
813 a8083063 Iustin Pop
814 a8083063 Iustin Pop
      node_volume[node] = volumeinfo
815 a8083063 Iustin Pop
816 a8083063 Iustin Pop
      # node_instance
817 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
818 a8083063 Iustin Pop
      if type(nodeinstance) != list:
819 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
820 a8083063 Iustin Pop
        bad = True
821 a8083063 Iustin Pop
        continue
822 a8083063 Iustin Pop
823 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
824 a8083063 Iustin Pop
825 a8083063 Iustin Pop
    node_vol_should = {}
826 a8083063 Iustin Pop
827 a8083063 Iustin Pop
    for instance in instancelist:
828 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
829 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
830 a8083063 Iustin Pop
                                     feedback_fn)
831 a8083063 Iustin Pop
      bad = bad or result
832 a8083063 Iustin Pop
833 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
834 a8083063 Iustin Pop
835 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
836 a8083063 Iustin Pop
837 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
838 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
839 a8083063 Iustin Pop
                                       feedback_fn)
840 a8083063 Iustin Pop
    bad = bad or result
841 a8083063 Iustin Pop
842 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
843 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
844 a8083063 Iustin Pop
                                         feedback_fn)
845 a8083063 Iustin Pop
    bad = bad or result
846 a8083063 Iustin Pop
847 a8083063 Iustin Pop
    return int(bad)
848 a8083063 Iustin Pop
849 a8083063 Iustin Pop
850 a8083063 Iustin Pop
def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
851 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
852 a8083063 Iustin Pop

853 a8083063 Iustin Pop
  """
854 a8083063 Iustin Pop
  if not instance.disks:
855 a8083063 Iustin Pop
    return True
856 a8083063 Iustin Pop
857 a8083063 Iustin Pop
  if not oneshot:
858 a8083063 Iustin Pop
    logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
859 a8083063 Iustin Pop
860 a8083063 Iustin Pop
  node = instance.primary_node
861 a8083063 Iustin Pop
862 a8083063 Iustin Pop
  for dev in instance.disks:
863 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
864 a8083063 Iustin Pop
865 a8083063 Iustin Pop
  retries = 0
866 a8083063 Iustin Pop
  while True:
867 a8083063 Iustin Pop
    max_time = 0
868 a8083063 Iustin Pop
    done = True
869 a8083063 Iustin Pop
    cumul_degraded = False
870 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
871 a8083063 Iustin Pop
    if not rstats:
872 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
873 a8083063 Iustin Pop
      retries += 1
874 a8083063 Iustin Pop
      if retries >= 10:
875 a8083063 Iustin Pop
        raise errors.RemoteError, ("Can't contact node %s for mirror data,"
876 a8083063 Iustin Pop
                                   " aborting." % node)
877 a8083063 Iustin Pop
      time.sleep(6)
878 a8083063 Iustin Pop
      continue
879 a8083063 Iustin Pop
    retries = 0
880 a8083063 Iustin Pop
    for i in range(len(rstats)):
881 a8083063 Iustin Pop
      mstat = rstats[i]
882 a8083063 Iustin Pop
      if mstat is None:
883 a8083063 Iustin Pop
        logger.ToStderr("Can't compute data for node %s/%s" %
884 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
885 a8083063 Iustin Pop
        continue
886 a8083063 Iustin Pop
      perc_done, est_time, is_degraded = mstat
887 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
888 a8083063 Iustin Pop
      if perc_done is not None:
889 a8083063 Iustin Pop
        done = False
890 a8083063 Iustin Pop
        if est_time is not None:
891 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
892 a8083063 Iustin Pop
          max_time = est_time
893 a8083063 Iustin Pop
        else:
894 a8083063 Iustin Pop
          rem_time = "no time estimate"
895 a8083063 Iustin Pop
        logger.ToStdout("- device %s: %5.2f%% done, %s" %
896 a8083063 Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
897 a8083063 Iustin Pop
    if done or oneshot:
898 a8083063 Iustin Pop
      break
899 a8083063 Iustin Pop
900 a8083063 Iustin Pop
    if unlock:
901 a8083063 Iustin Pop
      utils.Unlock('cmd')
902 a8083063 Iustin Pop
    try:
903 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
904 a8083063 Iustin Pop
    finally:
905 a8083063 Iustin Pop
      if unlock:
906 a8083063 Iustin Pop
        utils.Lock('cmd')
907 a8083063 Iustin Pop
908 a8083063 Iustin Pop
  if done:
909 a8083063 Iustin Pop
    logger.ToStdout("Instance %s's disks are in sync." % instance.name)
910 a8083063 Iustin Pop
  return not cumul_degraded
911 a8083063 Iustin Pop
912 a8083063 Iustin Pop
913 a8083063 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary):
914 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
915 a8083063 Iustin Pop

916 a8083063 Iustin Pop
  """
917 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
918 a8083063 Iustin Pop
919 a8083063 Iustin Pop
  result = True
920 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
921 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
922 a8083063 Iustin Pop
    if not rstats:
923 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
924 a8083063 Iustin Pop
      result = False
925 a8083063 Iustin Pop
    else:
926 a8083063 Iustin Pop
      result = result and (not rstats[5])
927 a8083063 Iustin Pop
  if dev.children:
928 a8083063 Iustin Pop
    for child in dev.children:
929 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
930 a8083063 Iustin Pop
931 a8083063 Iustin Pop
  return result
932 a8083063 Iustin Pop
933 a8083063 Iustin Pop
934 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
935 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
936 a8083063 Iustin Pop

937 a8083063 Iustin Pop
  """
938 a8083063 Iustin Pop
  _OP_REQP = []
939 a8083063 Iustin Pop
940 a8083063 Iustin Pop
  def CheckPrereq(self):
941 a8083063 Iustin Pop
    """Check prerequisites.
942 a8083063 Iustin Pop

943 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
944 a8083063 Iustin Pop

945 a8083063 Iustin Pop
    """
946 a8083063 Iustin Pop
    return
947 a8083063 Iustin Pop
948 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
949 a8083063 Iustin Pop
    """Compute the list of OSes.
950 a8083063 Iustin Pop

951 a8083063 Iustin Pop
    """
952 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
953 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
954 a8083063 Iustin Pop
    if node_data == False:
955 a8083063 Iustin Pop
      raise errors.OpExecError, "Can't gather the list of OSes"
956 a8083063 Iustin Pop
    return node_data
957 a8083063 Iustin Pop
958 a8083063 Iustin Pop
959 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
960 a8083063 Iustin Pop
  """Logical unit for removing a node.
961 a8083063 Iustin Pop

962 a8083063 Iustin Pop
  """
963 a8083063 Iustin Pop
  HPATH = "node-remove"
964 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
965 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
966 a8083063 Iustin Pop
967 a8083063 Iustin Pop
  def BuildHooksEnv(self):
968 a8083063 Iustin Pop
    """Build hooks env.
969 a8083063 Iustin Pop

970 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
971 a8083063 Iustin Pop
    node would not allows itself to run.
972 a8083063 Iustin Pop

973 a8083063 Iustin Pop
    """
974 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
975 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
976 a8083063 Iustin Pop
    return {"NODE_NAME": self.op.node_name}, all_nodes, all_nodes
977 a8083063 Iustin Pop
978 a8083063 Iustin Pop
  def CheckPrereq(self):
979 a8083063 Iustin Pop
    """Check prerequisites.
980 a8083063 Iustin Pop

981 a8083063 Iustin Pop
    This checks:
982 a8083063 Iustin Pop
     - the node exists in the configuration
983 a8083063 Iustin Pop
     - it does not have primary or secondary instances
984 a8083063 Iustin Pop
     - it's not the master
985 a8083063 Iustin Pop

986 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
987 a8083063 Iustin Pop

988 a8083063 Iustin Pop
    """
989 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
990 a8083063 Iustin Pop
    if node is None:
991 a8083063 Iustin Pop
      logger.Error("Error: Node '%s' is unknown." % self.op.node_name)
992 a8083063 Iustin Pop
      return 1
993 a8083063 Iustin Pop
994 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
995 a8083063 Iustin Pop
996 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
997 a8083063 Iustin Pop
    if node.name == masternode:
998 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node is the master node,"
999 a8083063 Iustin Pop
                                   " you need to failover first.")
1000 a8083063 Iustin Pop
1001 a8083063 Iustin Pop
    for instance_name in instance_list:
1002 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1003 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1004 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Instance %s still running on the node,"
1005 a8083063 Iustin Pop
                                     " please remove first." % instance_name)
1006 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1007 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Instance %s has node as a secondary,"
1008 a8083063 Iustin Pop
                                     " please remove first." % instance_name)
1009 a8083063 Iustin Pop
    self.op.node_name = node.name
1010 a8083063 Iustin Pop
    self.node = node
1011 a8083063 Iustin Pop
1012 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1013 a8083063 Iustin Pop
    """Removes the node from the cluster.
1014 a8083063 Iustin Pop

1015 a8083063 Iustin Pop
    """
1016 a8083063 Iustin Pop
    node = self.node
1017 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1018 a8083063 Iustin Pop
                node.name)
1019 a8083063 Iustin Pop
1020 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1021 a8083063 Iustin Pop
1022 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1023 a8083063 Iustin Pop
1024 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1025 a8083063 Iustin Pop
1026 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1027 a8083063 Iustin Pop
1028 a8083063 Iustin Pop
1029 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1030 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1031 a8083063 Iustin Pop

1032 a8083063 Iustin Pop
  """
1033 a8083063 Iustin Pop
  _OP_REQP = ["output_fields"]
1034 a8083063 Iustin Pop
1035 a8083063 Iustin Pop
  def CheckPrereq(self):
1036 a8083063 Iustin Pop
    """Check prerequisites.
1037 a8083063 Iustin Pop

1038 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1039 a8083063 Iustin Pop

1040 a8083063 Iustin Pop
    """
1041 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1042 a8083063 Iustin Pop
                                     "mtotal", "mnode", "mfree"])
1043 a8083063 Iustin Pop
1044 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "pinst", "sinst", "pip", "sip"],
1045 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1046 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1047 a8083063 Iustin Pop
1048 a8083063 Iustin Pop
1049 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1050 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1051 a8083063 Iustin Pop

1052 a8083063 Iustin Pop
    """
1053 a8083063 Iustin Pop
    nodenames = utils.NiceSort(self.cfg.GetNodeList())
1054 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1055 a8083063 Iustin Pop
1056 a8083063 Iustin Pop
1057 a8083063 Iustin Pop
    # begin data gathering
1058 a8083063 Iustin Pop
1059 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1060 a8083063 Iustin Pop
      live_data = {}
1061 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1062 a8083063 Iustin Pop
      for name in nodenames:
1063 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1064 a8083063 Iustin Pop
        if nodeinfo:
1065 a8083063 Iustin Pop
          live_data[name] = {
1066 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1067 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1068 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1069 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1070 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1071 a8083063 Iustin Pop
            }
1072 a8083063 Iustin Pop
        else:
1073 a8083063 Iustin Pop
          live_data[name] = {}
1074 a8083063 Iustin Pop
    else:
1075 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1076 a8083063 Iustin Pop
1077 a8083063 Iustin Pop
    node_to_primary = dict.fromkeys(nodenames, 0)
1078 a8083063 Iustin Pop
    node_to_secondary = dict.fromkeys(nodenames, 0)
1079 a8083063 Iustin Pop
1080 a8083063 Iustin Pop
    if "pinst" in self.op.output_fields or "sinst" in self.op.output_fields:
1081 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1082 a8083063 Iustin Pop
1083 a8083063 Iustin Pop
      for instance in instancelist:
1084 a8083063 Iustin Pop
        instanceinfo = self.cfg.GetInstanceInfo(instance)
1085 a8083063 Iustin Pop
        node_to_primary[instanceinfo.primary_node] += 1
1086 a8083063 Iustin Pop
        for secnode in instanceinfo.secondary_nodes:
1087 a8083063 Iustin Pop
          node_to_secondary[secnode] += 1
1088 a8083063 Iustin Pop
1089 a8083063 Iustin Pop
    # end data gathering
1090 a8083063 Iustin Pop
1091 a8083063 Iustin Pop
    output = []
1092 a8083063 Iustin Pop
    for node in nodelist:
1093 a8083063 Iustin Pop
      node_output = []
1094 a8083063 Iustin Pop
      for field in self.op.output_fields:
1095 a8083063 Iustin Pop
        if field == "name":
1096 a8083063 Iustin Pop
          val = node.name
1097 a8083063 Iustin Pop
        elif field == "pinst":
1098 a8083063 Iustin Pop
          val = node_to_primary[node.name]
1099 a8083063 Iustin Pop
        elif field == "sinst":
1100 a8083063 Iustin Pop
          val = node_to_secondary[node.name]
1101 a8083063 Iustin Pop
        elif field == "pip":
1102 a8083063 Iustin Pop
          val = node.primary_ip
1103 a8083063 Iustin Pop
        elif field == "sip":
1104 a8083063 Iustin Pop
          val = node.secondary_ip
1105 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1106 a8083063 Iustin Pop
          val = live_data[node.name].get(field, "?")
1107 a8083063 Iustin Pop
        else:
1108 a8083063 Iustin Pop
          raise errors.ParameterError, field
1109 a8083063 Iustin Pop
        val = str(val)
1110 a8083063 Iustin Pop
        node_output.append(val)
1111 a8083063 Iustin Pop
      output.append(node_output)
1112 a8083063 Iustin Pop
1113 a8083063 Iustin Pop
    return output
1114 a8083063 Iustin Pop
1115 a8083063 Iustin Pop
1116 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1117 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1118 dcb93971 Michael Hanselmann

1119 dcb93971 Michael Hanselmann
  """
1120 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1121 dcb93971 Michael Hanselmann
1122 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1123 dcb93971 Michael Hanselmann
    """Check prerequisites.
1124 dcb93971 Michael Hanselmann

1125 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1126 dcb93971 Michael Hanselmann

1127 dcb93971 Michael Hanselmann
    """
1128 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1129 dcb93971 Michael Hanselmann
1130 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1131 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1132 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1133 dcb93971 Michael Hanselmann
1134 dcb93971 Michael Hanselmann
1135 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1136 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1137 dcb93971 Michael Hanselmann

1138 dcb93971 Michael Hanselmann
    """
1139 dcb93971 Michael Hanselmann
    nodenames = utils.NiceSort([node.name for node in self.nodes])
1140 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1141 dcb93971 Michael Hanselmann
1142 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1143 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1144 dcb93971 Michael Hanselmann
1145 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1146 dcb93971 Michael Hanselmann
1147 dcb93971 Michael Hanselmann
    output = []
1148 dcb93971 Michael Hanselmann
    for node in nodenames:
1149 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1150 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1151 dcb93971 Michael Hanselmann
1152 dcb93971 Michael Hanselmann
      for vol in node_vols:
1153 dcb93971 Michael Hanselmann
        node_output = []
1154 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1155 dcb93971 Michael Hanselmann
          if field == "node":
1156 dcb93971 Michael Hanselmann
            val = node
1157 dcb93971 Michael Hanselmann
          elif field == "phys":
1158 dcb93971 Michael Hanselmann
            val = vol['dev']
1159 dcb93971 Michael Hanselmann
          elif field == "vg":
1160 dcb93971 Michael Hanselmann
            val = vol['vg']
1161 dcb93971 Michael Hanselmann
          elif field == "name":
1162 dcb93971 Michael Hanselmann
            val = vol['name']
1163 dcb93971 Michael Hanselmann
          elif field == "size":
1164 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1165 dcb93971 Michael Hanselmann
          elif field == "instance":
1166 dcb93971 Michael Hanselmann
            for inst in ilist:
1167 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1168 dcb93971 Michael Hanselmann
                continue
1169 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1170 dcb93971 Michael Hanselmann
                val = inst.name
1171 dcb93971 Michael Hanselmann
                break
1172 dcb93971 Michael Hanselmann
            else:
1173 dcb93971 Michael Hanselmann
              val = '-'
1174 dcb93971 Michael Hanselmann
          else:
1175 dcb93971 Michael Hanselmann
            raise errors.ParameterError, field
1176 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1177 dcb93971 Michael Hanselmann
1178 dcb93971 Michael Hanselmann
        output.append(node_output)
1179 dcb93971 Michael Hanselmann
1180 dcb93971 Michael Hanselmann
    return output
1181 dcb93971 Michael Hanselmann
1182 dcb93971 Michael Hanselmann
1183 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1184 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1185 a8083063 Iustin Pop

1186 a8083063 Iustin Pop
  """
1187 a8083063 Iustin Pop
  HPATH = "node-add"
1188 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1189 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1190 a8083063 Iustin Pop
1191 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1192 a8083063 Iustin Pop
    """Build hooks env.
1193 a8083063 Iustin Pop

1194 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1195 a8083063 Iustin Pop

1196 a8083063 Iustin Pop
    """
1197 a8083063 Iustin Pop
    env = {
1198 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1199 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1200 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1201 a8083063 Iustin Pop
      }
1202 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1203 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1204 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1205 a8083063 Iustin Pop
1206 a8083063 Iustin Pop
  def CheckPrereq(self):
1207 a8083063 Iustin Pop
    """Check prerequisites.
1208 a8083063 Iustin Pop

1209 a8083063 Iustin Pop
    This checks:
1210 a8083063 Iustin Pop
     - the new node is not already in the config
1211 a8083063 Iustin Pop
     - it is resolvable
1212 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1213 a8083063 Iustin Pop

1214 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1215 a8083063 Iustin Pop

1216 a8083063 Iustin Pop
    """
1217 a8083063 Iustin Pop
    node_name = self.op.node_name
1218 a8083063 Iustin Pop
    cfg = self.cfg
1219 a8083063 Iustin Pop
1220 a8083063 Iustin Pop
    dns_data = utils.LookupHostname(node_name)
1221 a8083063 Iustin Pop
    if not dns_data:
1222 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node %s is not resolvable" % node_name)
1223 a8083063 Iustin Pop
1224 a8083063 Iustin Pop
    node = dns_data['hostname']
1225 a8083063 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data['ip']
1226 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1227 a8083063 Iustin Pop
    if secondary_ip is None:
1228 a8083063 Iustin Pop
      secondary_ip = primary_ip
1229 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1230 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid secondary IP given")
1231 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1232 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1233 a8083063 Iustin Pop
    if node in node_list:
1234 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node %s is already in the configuration"
1235 a8083063 Iustin Pop
                                   % node)
1236 a8083063 Iustin Pop
1237 a8083063 Iustin Pop
    for existing_node_name in node_list:
1238 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1239 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1240 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1241 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1242 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1243 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("New node ip address(es) conflict with"
1244 a8083063 Iustin Pop
                                     " existing node %s" % existing_node.name)
1245 a8083063 Iustin Pop
1246 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1247 a8083063 Iustin Pop
    # same as for the master
1248 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1249 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1250 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1251 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1252 a8083063 Iustin Pop
      if master_singlehomed:
1253 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("The master has no private ip but the"
1254 a8083063 Iustin Pop
                                     " new node has one")
1255 a8083063 Iustin Pop
      else:
1256 a8083063 Iustin Pop
        raise errors.OpPrereqError ("The master has a private ip but the"
1257 a8083063 Iustin Pop
                                    " new node doesn't have one")
1258 a8083063 Iustin Pop
1259 a8083063 Iustin Pop
    # checks reachablity
1260 a8083063 Iustin Pop
    command = ["fping", "-q", primary_ip]
1261 a8083063 Iustin Pop
    result = utils.RunCmd(command)
1262 a8083063 Iustin Pop
    if result.failed:
1263 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node not reachable by ping")
1264 a8083063 Iustin Pop
1265 a8083063 Iustin Pop
    if not newbie_singlehomed:
1266 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1267 a8083063 Iustin Pop
      command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip]
1268 a8083063 Iustin Pop
      result = utils.RunCmd(command)
1269 a8083063 Iustin Pop
      if result.failed:
1270 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Node secondary ip not reachable by ping")
1271 a8083063 Iustin Pop
1272 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1273 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1274 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1275 a8083063 Iustin Pop
1276 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1277 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1278 a8083063 Iustin Pop

1279 a8083063 Iustin Pop
    """
1280 a8083063 Iustin Pop
    new_node = self.new_node
1281 a8083063 Iustin Pop
    node = new_node.name
1282 a8083063 Iustin Pop
1283 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1284 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1285 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1286 a8083063 Iustin Pop
      raise errors.OpExecError, ("ganeti password corruption detected")
1287 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1288 a8083063 Iustin Pop
    try:
1289 a8083063 Iustin Pop
      gntpem = f.read(8192)
1290 a8083063 Iustin Pop
    finally:
1291 a8083063 Iustin Pop
      f.close()
1292 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1293 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1294 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1295 a8083063 Iustin Pop
    # parsed by the shell sequence below
1296 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1297 a8083063 Iustin Pop
      raise errors.OpExecError, ("invalid PEM encoding in the SSL certificate")
1298 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1299 a8083063 Iustin Pop
      raise errors.OpExecError, ("PEM must end with newline")
1300 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1301 a8083063 Iustin Pop
1302 a8083063 Iustin Pop
    # remove first the root's known_hosts file
1303 a8083063 Iustin Pop
    utils.RemoveFile("/root/.ssh/known_hosts")
1304 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1305 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1306 a8083063 Iustin Pop
    # either by being constants or by the checks above
1307 a8083063 Iustin Pop
    ss = self.sstore
1308 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1309 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1310 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1311 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1312 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1313 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1314 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1315 a8083063 Iustin Pop
1316 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1317 a8083063 Iustin Pop
    if result.failed:
1318 a8083063 Iustin Pop
      raise errors.OpExecError, ("Remote command on node %s, error: %s,"
1319 a8083063 Iustin Pop
                                 " output: %s" %
1320 a8083063 Iustin Pop
                                 (node, result.fail_reason, result.output))
1321 a8083063 Iustin Pop
1322 a8083063 Iustin Pop
    # check connectivity
1323 a8083063 Iustin Pop
    time.sleep(4)
1324 a8083063 Iustin Pop
1325 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1326 a8083063 Iustin Pop
    if result:
1327 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1328 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1329 a8083063 Iustin Pop
                    (node, result))
1330 a8083063 Iustin Pop
      else:
1331 a8083063 Iustin Pop
        raise errors.OpExecError, ("Version mismatch master version %s,"
1332 a8083063 Iustin Pop
                                   " node version %s" %
1333 a8083063 Iustin Pop
                                   (constants.PROTOCOL_VERSION, result))
1334 a8083063 Iustin Pop
    else:
1335 a8083063 Iustin Pop
      raise errors.OpExecError, ("Cannot get version from the new node")
1336 a8083063 Iustin Pop
1337 a8083063 Iustin Pop
    # setup ssh on node
1338 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1339 a8083063 Iustin Pop
    keyarray = []
1340 a8083063 Iustin Pop
    keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub",
1341 a8083063 Iustin Pop
                "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub",
1342 a8083063 Iustin Pop
                "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"]
1343 a8083063 Iustin Pop
1344 a8083063 Iustin Pop
    for i in keyfiles:
1345 a8083063 Iustin Pop
      f = open(i, 'r')
1346 a8083063 Iustin Pop
      try:
1347 a8083063 Iustin Pop
        keyarray.append(f.read())
1348 a8083063 Iustin Pop
      finally:
1349 a8083063 Iustin Pop
        f.close()
1350 a8083063 Iustin Pop
1351 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1352 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1353 a8083063 Iustin Pop
1354 a8083063 Iustin Pop
    if not result:
1355 a8083063 Iustin Pop
      raise errors.OpExecError, ("Cannot transfer ssh keys to the new node")
1356 a8083063 Iustin Pop
1357 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1358 a8083063 Iustin Pop
    _UpdateEtcHosts(new_node.name, new_node.primary_ip)
1359 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1360 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1361 a8083063 Iustin Pop
1362 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1363 a8083063 Iustin Pop
      result = ssh.SSHCall(node, "root",
1364 a8083063 Iustin Pop
                           "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip)
1365 a8083063 Iustin Pop
      if result.failed:
1366 a8083063 Iustin Pop
        raise errors.OpExecError, ("Node claims it doesn't have the"
1367 a8083063 Iustin Pop
                                   " secondary ip you gave (%s).\n"
1368 a8083063 Iustin Pop
                                   "Please fix and re-run this command." %
1369 a8083063 Iustin Pop
                                   new_node.secondary_ip)
1370 a8083063 Iustin Pop
1371 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1372 a8083063 Iustin Pop
    # including the node just added
1373 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1374 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1375 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1376 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1377 a8083063 Iustin Pop
1378 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1379 a8083063 Iustin Pop
    for fname in ("/etc/hosts", "/etc/ssh/ssh_known_hosts"):
1380 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1381 a8083063 Iustin Pop
      for to_node in dist_nodes:
1382 a8083063 Iustin Pop
        if not result[to_node]:
1383 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1384 a8083063 Iustin Pop
                       (fname, to_node))
1385 a8083063 Iustin Pop
1386 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1387 a8083063 Iustin Pop
    for fname in to_copy:
1388 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1389 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1390 a8083063 Iustin Pop
1391 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1392 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1393 a8083063 Iustin Pop
1394 a8083063 Iustin Pop
1395 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1396 a8083063 Iustin Pop
  """Failover the master node to the current node.
1397 a8083063 Iustin Pop

1398 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1399 a8083063 Iustin Pop

1400 a8083063 Iustin Pop
  """
1401 a8083063 Iustin Pop
  HPATH = "master-failover"
1402 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1403 a8083063 Iustin Pop
  REQ_MASTER = False
1404 a8083063 Iustin Pop
  _OP_REQP = []
1405 a8083063 Iustin Pop
1406 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1407 a8083063 Iustin Pop
    """Build hooks env.
1408 a8083063 Iustin Pop

1409 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1410 a8083063 Iustin Pop
    the nodes in the post phase.
1411 a8083063 Iustin Pop

1412 a8083063 Iustin Pop
    """
1413 a8083063 Iustin Pop
    env = {
1414 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1415 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1416 a8083063 Iustin Pop
      }
1417 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1418 a8083063 Iustin Pop
1419 a8083063 Iustin Pop
  def CheckPrereq(self):
1420 a8083063 Iustin Pop
    """Check prerequisites.
1421 a8083063 Iustin Pop

1422 a8083063 Iustin Pop
    This checks that we are not already the master.
1423 a8083063 Iustin Pop

1424 a8083063 Iustin Pop
    """
1425 a8083063 Iustin Pop
    self.new_master = socket.gethostname()
1426 a8083063 Iustin Pop
1427 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1428 a8083063 Iustin Pop
1429 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1430 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("This commands must be run on the node"
1431 a8083063 Iustin Pop
                                   " where you want the new master to be.\n"
1432 a8083063 Iustin Pop
                                   "%s is already the master" %
1433 a8083063 Iustin Pop
                                   self.old_master)
1434 a8083063 Iustin Pop
1435 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1436 a8083063 Iustin Pop
    """Failover the master node.
1437 a8083063 Iustin Pop

1438 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1439 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1440 a8083063 Iustin Pop
    master.
1441 a8083063 Iustin Pop

1442 a8083063 Iustin Pop
    """
1443 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1444 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1445 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1446 a8083063 Iustin Pop
1447 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1448 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1449 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1450 a8083063 Iustin Pop
1451 880478f8 Iustin Pop
    ss = self.sstore
1452 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1453 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1454 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1455 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1456 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1457 880478f8 Iustin Pop
1458 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1459 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1460 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1461 880478f8 Iustin Pop
      feedback_fn("Error in activating the master IP on the new master,\n"
1462 880478f8 Iustin Pop
                  "please fix manually.")
1463 a8083063 Iustin Pop
1464 a8083063 Iustin Pop
1465 a8083063 Iustin Pop
1466 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1467 a8083063 Iustin Pop
  """Query cluster configuration.
1468 a8083063 Iustin Pop

1469 a8083063 Iustin Pop
  """
1470 a8083063 Iustin Pop
  _OP_REQP = []
1471 59322403 Iustin Pop
  REQ_MASTER = False
1472 a8083063 Iustin Pop
1473 a8083063 Iustin Pop
  def CheckPrereq(self):
1474 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1475 a8083063 Iustin Pop

1476 a8083063 Iustin Pop
    """
1477 a8083063 Iustin Pop
    pass
1478 a8083063 Iustin Pop
1479 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1480 a8083063 Iustin Pop
    """Return cluster config.
1481 a8083063 Iustin Pop

1482 a8083063 Iustin Pop
    """
1483 a8083063 Iustin Pop
    result = {
1484 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1485 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1486 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1487 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1488 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1489 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1490 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1491 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1492 a8083063 Iustin Pop
      }
1493 a8083063 Iustin Pop
1494 a8083063 Iustin Pop
    return result
1495 a8083063 Iustin Pop
1496 a8083063 Iustin Pop
1497 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1498 a8083063 Iustin Pop
  """Copy file to cluster.
1499 a8083063 Iustin Pop

1500 a8083063 Iustin Pop
  """
1501 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1502 a8083063 Iustin Pop
1503 a8083063 Iustin Pop
  def CheckPrereq(self):
1504 a8083063 Iustin Pop
    """Check prerequisites.
1505 a8083063 Iustin Pop

1506 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1507 a8083063 Iustin Pop
    of nodes is valid.
1508 a8083063 Iustin Pop

1509 a8083063 Iustin Pop
    """
1510 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1511 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1512 dcb93971 Michael Hanselmann
1513 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1514 a8083063 Iustin Pop
1515 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1516 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1517 a8083063 Iustin Pop

1518 a8083063 Iustin Pop
    Args:
1519 a8083063 Iustin Pop
      opts - class with options as members
1520 a8083063 Iustin Pop
      args - list containing a single element, the file name
1521 a8083063 Iustin Pop
    Opts used:
1522 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1523 a8083063 Iustin Pop

1524 a8083063 Iustin Pop
    """
1525 a8083063 Iustin Pop
    filename = self.op.filename
1526 a8083063 Iustin Pop
1527 a8083063 Iustin Pop
    myname = socket.gethostname()
1528 a8083063 Iustin Pop
1529 a8083063 Iustin Pop
    for node in self.nodes:
1530 a8083063 Iustin Pop
      if node == myname:
1531 a8083063 Iustin Pop
        continue
1532 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1533 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1534 a8083063 Iustin Pop
1535 a8083063 Iustin Pop
1536 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1537 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1538 a8083063 Iustin Pop

1539 a8083063 Iustin Pop
  """
1540 a8083063 Iustin Pop
  _OP_REQP = []
1541 a8083063 Iustin Pop
1542 a8083063 Iustin Pop
  def CheckPrereq(self):
1543 a8083063 Iustin Pop
    """No prerequisites.
1544 a8083063 Iustin Pop

1545 a8083063 Iustin Pop
    """
1546 a8083063 Iustin Pop
    pass
1547 a8083063 Iustin Pop
1548 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1549 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1550 a8083063 Iustin Pop

1551 a8083063 Iustin Pop
    """
1552 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1553 a8083063 Iustin Pop
1554 a8083063 Iustin Pop
1555 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1556 a8083063 Iustin Pop
  """Run a command on some nodes.
1557 a8083063 Iustin Pop

1558 a8083063 Iustin Pop
  """
1559 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1560 a8083063 Iustin Pop
1561 a8083063 Iustin Pop
  def CheckPrereq(self):
1562 a8083063 Iustin Pop
    """Check prerequisites.
1563 a8083063 Iustin Pop

1564 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1565 a8083063 Iustin Pop

1566 a8083063 Iustin Pop
    """
1567 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1568 a8083063 Iustin Pop
1569 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1570 a8083063 Iustin Pop
    """Run a command on some nodes.
1571 a8083063 Iustin Pop

1572 a8083063 Iustin Pop
    """
1573 a8083063 Iustin Pop
    data = []
1574 a8083063 Iustin Pop
    for node in self.nodes:
1575 dcb93971 Michael Hanselmann
      result = utils.RunCmd(["ssh", node.name, self.op.command])
1576 dcb93971 Michael Hanselmann
      data.append((node.name, result.cmd, result.output, result.exit_code))
1577 a8083063 Iustin Pop
1578 a8083063 Iustin Pop
    return data
1579 a8083063 Iustin Pop
1580 a8083063 Iustin Pop
1581 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1582 a8083063 Iustin Pop
  """Bring up an instance's disks.
1583 a8083063 Iustin Pop

1584 a8083063 Iustin Pop
  """
1585 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1586 a8083063 Iustin Pop
1587 a8083063 Iustin Pop
  def CheckPrereq(self):
1588 a8083063 Iustin Pop
    """Check prerequisites.
1589 a8083063 Iustin Pop

1590 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1591 a8083063 Iustin Pop

1592 a8083063 Iustin Pop
    """
1593 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1594 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1595 a8083063 Iustin Pop
    if instance is None:
1596 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1597 a8083063 Iustin Pop
                                   self.op.instance_name)
1598 a8083063 Iustin Pop
    self.instance = instance
1599 a8083063 Iustin Pop
1600 a8083063 Iustin Pop
1601 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1602 a8083063 Iustin Pop
    """Activate the disks.
1603 a8083063 Iustin Pop

1604 a8083063 Iustin Pop
    """
1605 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1606 a8083063 Iustin Pop
    if not disks_ok:
1607 a8083063 Iustin Pop
      raise errors.OpExecError, ("Cannot activate block devices")
1608 a8083063 Iustin Pop
1609 a8083063 Iustin Pop
    return disks_info
1610 a8083063 Iustin Pop
1611 a8083063 Iustin Pop
1612 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1613 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1614 a8083063 Iustin Pop

1615 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1616 a8083063 Iustin Pop

1617 a8083063 Iustin Pop
  Args:
1618 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1619 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1620 a8083063 Iustin Pop
                        in an error return from the function
1621 a8083063 Iustin Pop

1622 a8083063 Iustin Pop
  Returns:
1623 a8083063 Iustin Pop
    false if the operation failed
1624 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1625 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1626 a8083063 Iustin Pop
  """
1627 a8083063 Iustin Pop
  device_info = []
1628 a8083063 Iustin Pop
  disks_ok = True
1629 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1630 a8083063 Iustin Pop
    master_result = None
1631 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1632 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1633 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1634 a8083063 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, is_primary)
1635 a8083063 Iustin Pop
      if not result:
1636 a8083063 Iustin Pop
        logger.Error("could not prepare block device %s on node %s (is_pri"
1637 a8083063 Iustin Pop
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1638 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1639 a8083063 Iustin Pop
          disks_ok = False
1640 a8083063 Iustin Pop
      if is_primary:
1641 a8083063 Iustin Pop
        master_result = result
1642 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1643 a8083063 Iustin Pop
                        master_result))
1644 a8083063 Iustin Pop
1645 a8083063 Iustin Pop
  return disks_ok, device_info
1646 a8083063 Iustin Pop
1647 a8083063 Iustin Pop
1648 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1649 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1650 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1651 fe7b0351 Michael Hanselmann
  if not disks_ok:
1652 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1653 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1654 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1655 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1656 fe7b0351 Michael Hanselmann
    raise errors.OpExecError, ("Disk consistency error")
1657 fe7b0351 Michael Hanselmann
1658 fe7b0351 Michael Hanselmann
1659 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1660 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1661 a8083063 Iustin Pop

1662 a8083063 Iustin Pop
  """
1663 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1664 a8083063 Iustin Pop
1665 a8083063 Iustin Pop
  def CheckPrereq(self):
1666 a8083063 Iustin Pop
    """Check prerequisites.
1667 a8083063 Iustin Pop

1668 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1669 a8083063 Iustin Pop

1670 a8083063 Iustin Pop
    """
1671 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1672 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1673 a8083063 Iustin Pop
    if instance is None:
1674 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1675 a8083063 Iustin Pop
                                   self.op.instance_name)
1676 a8083063 Iustin Pop
    self.instance = instance
1677 a8083063 Iustin Pop
1678 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1679 a8083063 Iustin Pop
    """Deactivate the disks
1680 a8083063 Iustin Pop

1681 a8083063 Iustin Pop
    """
1682 a8083063 Iustin Pop
    instance = self.instance
1683 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1684 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1685 a8083063 Iustin Pop
    if not type(ins_l) is list:
1686 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't contact node '%s'" %
1687 a8083063 Iustin Pop
                                 instance.primary_node)
1688 a8083063 Iustin Pop
1689 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1690 a8083063 Iustin Pop
      raise errors.OpExecError, ("Instance is running, can't shutdown"
1691 a8083063 Iustin Pop
                                 " block devices.")
1692 a8083063 Iustin Pop
1693 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1694 a8083063 Iustin Pop
1695 a8083063 Iustin Pop
1696 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1697 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1698 a8083063 Iustin Pop

1699 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1700 a8083063 Iustin Pop

1701 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1702 a8083063 Iustin Pop
  ignored.
1703 a8083063 Iustin Pop

1704 a8083063 Iustin Pop
  """
1705 a8083063 Iustin Pop
  result = True
1706 a8083063 Iustin Pop
  for disk in instance.disks:
1707 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1708 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1709 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1710 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1711 a8083063 Iustin Pop
                     (disk.iv_name, node))
1712 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1713 a8083063 Iustin Pop
          result = False
1714 a8083063 Iustin Pop
  return result
1715 a8083063 Iustin Pop
1716 a8083063 Iustin Pop
1717 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1718 a8083063 Iustin Pop
  """Starts an instance.
1719 a8083063 Iustin Pop

1720 a8083063 Iustin Pop
  """
1721 a8083063 Iustin Pop
  HPATH = "instance-start"
1722 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1723 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1724 a8083063 Iustin Pop
1725 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1726 a8083063 Iustin Pop
    """Build hooks env.
1727 a8083063 Iustin Pop

1728 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1729 a8083063 Iustin Pop

1730 a8083063 Iustin Pop
    """
1731 a8083063 Iustin Pop
    env = {
1732 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
1733 a8083063 Iustin Pop
      "INSTANCE_PRIMARY": self.instance.primary_node,
1734 a8083063 Iustin Pop
      "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
1735 a8083063 Iustin Pop
      "FORCE": self.op.force,
1736 a8083063 Iustin Pop
      }
1737 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1738 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1739 a8083063 Iustin Pop
    return env, nl, nl
1740 a8083063 Iustin Pop
1741 a8083063 Iustin Pop
  def CheckPrereq(self):
1742 a8083063 Iustin Pop
    """Check prerequisites.
1743 a8083063 Iustin Pop

1744 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1745 a8083063 Iustin Pop

1746 a8083063 Iustin Pop
    """
1747 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1748 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1749 a8083063 Iustin Pop
    if instance is None:
1750 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1751 a8083063 Iustin Pop
                                   self.op.instance_name)
1752 a8083063 Iustin Pop
1753 a8083063 Iustin Pop
    # check bridges existance
1754 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
1755 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
1756 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("one or more target bridges %s does not"
1757 a8083063 Iustin Pop
                                   " exist on destination node '%s'" %
1758 a8083063 Iustin Pop
                                   (brlist, instance.primary_node))
1759 a8083063 Iustin Pop
1760 a8083063 Iustin Pop
    self.instance = instance
1761 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1762 a8083063 Iustin Pop
1763 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1764 a8083063 Iustin Pop
    """Start the instance.
1765 a8083063 Iustin Pop

1766 a8083063 Iustin Pop
    """
1767 a8083063 Iustin Pop
    instance = self.instance
1768 a8083063 Iustin Pop
    force = self.op.force
1769 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1770 a8083063 Iustin Pop
1771 a8083063 Iustin Pop
    node_current = instance.primary_node
1772 a8083063 Iustin Pop
1773 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1774 a8083063 Iustin Pop
    if not nodeinfo:
1775 a8083063 Iustin Pop
      raise errors.OpExecError, ("Could not contact node %s for infos" %
1776 a8083063 Iustin Pop
                                 (node_current))
1777 a8083063 Iustin Pop
1778 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
1779 a8083063 Iustin Pop
    memory = instance.memory
1780 a8083063 Iustin Pop
    if memory > freememory:
1781 a8083063 Iustin Pop
      raise errors.OpExecError, ("Not enough memory to start instance"
1782 a8083063 Iustin Pop
                                 " %s on node %s"
1783 a8083063 Iustin Pop
                                 " needed %s MiB, available %s MiB" %
1784 a8083063 Iustin Pop
                                 (instance.name, node_current, memory,
1785 a8083063 Iustin Pop
                                  freememory))
1786 a8083063 Iustin Pop
1787 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
1788 a8083063 Iustin Pop
1789 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
1790 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
1791 a8083063 Iustin Pop
      raise errors.OpExecError, ("Could not start instance")
1792 a8083063 Iustin Pop
1793 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
1794 a8083063 Iustin Pop
1795 a8083063 Iustin Pop
1796 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
1797 a8083063 Iustin Pop
  """Shutdown an instance.
1798 a8083063 Iustin Pop

1799 a8083063 Iustin Pop
  """
1800 a8083063 Iustin Pop
  HPATH = "instance-stop"
1801 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1802 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1803 a8083063 Iustin Pop
1804 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1805 a8083063 Iustin Pop
    """Build hooks env.
1806 a8083063 Iustin Pop

1807 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1808 a8083063 Iustin Pop

1809 a8083063 Iustin Pop
    """
1810 a8083063 Iustin Pop
    env = {
1811 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
1812 a8083063 Iustin Pop
      "INSTANCE_PRIMARY": self.instance.primary_node,
1813 a8083063 Iustin Pop
      "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
1814 a8083063 Iustin Pop
      }
1815 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1816 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1817 a8083063 Iustin Pop
    return env, nl, nl
1818 a8083063 Iustin Pop
1819 a8083063 Iustin Pop
  def CheckPrereq(self):
1820 a8083063 Iustin Pop
    """Check prerequisites.
1821 a8083063 Iustin Pop

1822 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1823 a8083063 Iustin Pop

1824 a8083063 Iustin Pop
    """
1825 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1826 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1827 a8083063 Iustin Pop
    if instance is None:
1828 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1829 a8083063 Iustin Pop
                                   self.op.instance_name)
1830 a8083063 Iustin Pop
    self.instance = instance
1831 a8083063 Iustin Pop
1832 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1833 a8083063 Iustin Pop
    """Shutdown the instance.
1834 a8083063 Iustin Pop

1835 a8083063 Iustin Pop
    """
1836 a8083063 Iustin Pop
    instance = self.instance
1837 a8083063 Iustin Pop
    node_current = instance.primary_node
1838 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
1839 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
1840 a8083063 Iustin Pop
1841 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
1842 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1843 a8083063 Iustin Pop
1844 a8083063 Iustin Pop
1845 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
1846 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
1847 fe7b0351 Michael Hanselmann

1848 fe7b0351 Michael Hanselmann
  """
1849 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
1850 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
1851 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
1852 fe7b0351 Michael Hanselmann
1853 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
1854 fe7b0351 Michael Hanselmann
    """Build hooks env.
1855 fe7b0351 Michael Hanselmann

1856 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
1857 fe7b0351 Michael Hanselmann

1858 fe7b0351 Michael Hanselmann
    """
1859 fe7b0351 Michael Hanselmann
    env = {
1860 fe7b0351 Michael Hanselmann
      "INSTANCE_NAME": self.op.instance_name,
1861 fe7b0351 Michael Hanselmann
      "INSTANCE_PRIMARY": self.instance.primary_node,
1862 fe7b0351 Michael Hanselmann
      "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
1863 fe7b0351 Michael Hanselmann
      }
1864 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1865 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
1866 fe7b0351 Michael Hanselmann
    return env, nl, nl
1867 fe7b0351 Michael Hanselmann
1868 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
1869 fe7b0351 Michael Hanselmann
    """Check prerequisites.
1870 fe7b0351 Michael Hanselmann

1871 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
1872 fe7b0351 Michael Hanselmann

1873 fe7b0351 Michael Hanselmann
    """
1874 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
1875 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
1876 fe7b0351 Michael Hanselmann
    if instance is None:
1877 fe7b0351 Michael Hanselmann
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1878 fe7b0351 Michael Hanselmann
                                   self.op.instance_name)
1879 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
1880 fe7b0351 Michael Hanselmann
      raise errors.OpPrereqError, ("Instance '%s' has no disks" %
1881 fe7b0351 Michael Hanselmann
                                   self.op.instance_name)
1882 fe7b0351 Michael Hanselmann
    if instance.status != "down":
1883 fe7b0351 Michael Hanselmann
      raise errors.OpPrereqError, ("Instance '%s' is marked to be up" %
1884 fe7b0351 Michael Hanselmann
                                   self.op.instance_name)
1885 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
1886 fe7b0351 Michael Hanselmann
    if remote_info:
1887 fe7b0351 Michael Hanselmann
      raise errors.OpPrereqError, ("Instance '%s' is running on the node %s" %
1888 fe7b0351 Michael Hanselmann
                                   (self.op.instance_name,
1889 fe7b0351 Michael Hanselmann
                                    instance.primary_node))
1890 d0834de3 Michael Hanselmann
1891 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
1892 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
1893 d0834de3 Michael Hanselmann
      # OS verification
1894 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
1895 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
1896 d0834de3 Michael Hanselmann
      if pnode is None:
1897 d0834de3 Michael Hanselmann
        raise errors.OpPrereqError, ("Primary node '%s' is unknown" %
1898 d0834de3 Michael Hanselmann
                                     self.op.pnode)
1899 d0834de3 Michael Hanselmann
      os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
1900 d0834de3 Michael Hanselmann
      if not isinstance(os_obj, objects.OS):
1901 d0834de3 Michael Hanselmann
        raise errors.OpPrereqError, ("OS '%s' not in supported OS list for"
1902 d0834de3 Michael Hanselmann
                                     " primary node"  % self.op.os_type)
1903 d0834de3 Michael Hanselmann
1904 fe7b0351 Michael Hanselmann
    self.instance = instance
1905 fe7b0351 Michael Hanselmann
1906 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
1907 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
1908 fe7b0351 Michael Hanselmann

1909 fe7b0351 Michael Hanselmann
    """
1910 fe7b0351 Michael Hanselmann
    inst = self.instance
1911 fe7b0351 Michael Hanselmann
1912 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
1913 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
1914 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
1915 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
1916 d0834de3 Michael Hanselmann
1917 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
1918 fe7b0351 Michael Hanselmann
    try:
1919 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
1920 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
1921 fe7b0351 Michael Hanselmann
        raise errors.OpExecError, ("Could not install OS for instance %s "
1922 fe7b0351 Michael Hanselmann
                                   "on node %s" %
1923 fe7b0351 Michael Hanselmann
                                   (inst.name, inst.primary_node))
1924 fe7b0351 Michael Hanselmann
    finally:
1925 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
1926 fe7b0351 Michael Hanselmann
1927 fe7b0351 Michael Hanselmann
1928 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
1929 a8083063 Iustin Pop
  """Remove an instance.
1930 a8083063 Iustin Pop

1931 a8083063 Iustin Pop
  """
1932 a8083063 Iustin Pop
  HPATH = "instance-remove"
1933 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1934 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1935 a8083063 Iustin Pop
1936 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1937 a8083063 Iustin Pop
    """Build hooks env.
1938 a8083063 Iustin Pop

1939 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1940 a8083063 Iustin Pop

1941 a8083063 Iustin Pop
    """
1942 a8083063 Iustin Pop
    env = {
1943 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
1944 a8083063 Iustin Pop
      "INSTANCE_PRIMARY": self.instance.primary_node,
1945 a8083063 Iustin Pop
      "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
1946 a8083063 Iustin Pop
      }
1947 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1948 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1949 a8083063 Iustin Pop
    return env, nl, nl
1950 a8083063 Iustin Pop
1951 a8083063 Iustin Pop
  def CheckPrereq(self):
1952 a8083063 Iustin Pop
    """Check prerequisites.
1953 a8083063 Iustin Pop

1954 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1955 a8083063 Iustin Pop

1956 a8083063 Iustin Pop
    """
1957 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1958 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1959 a8083063 Iustin Pop
    if instance is None:
1960 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1961 a8083063 Iustin Pop
                                   self.op.instance_name)
1962 a8083063 Iustin Pop
    self.instance = instance
1963 a8083063 Iustin Pop
1964 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1965 a8083063 Iustin Pop
    """Remove the instance.
1966 a8083063 Iustin Pop

1967 a8083063 Iustin Pop
    """
1968 a8083063 Iustin Pop
    instance = self.instance
1969 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
1970 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
1971 a8083063 Iustin Pop
1972 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
1973 a8083063 Iustin Pop
      raise errors.OpExecError, ("Could not shutdown instance %s on node %s" %
1974 a8083063 Iustin Pop
                                 (instance.name, instance.primary_node))
1975 a8083063 Iustin Pop
1976 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
1977 a8083063 Iustin Pop
1978 a8083063 Iustin Pop
    _RemoveDisks(instance, self.cfg)
1979 a8083063 Iustin Pop
1980 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
1981 a8083063 Iustin Pop
1982 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
1983 a8083063 Iustin Pop
1984 a8083063 Iustin Pop
1985 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
1986 a8083063 Iustin Pop
  """Logical unit for querying instances.
1987 a8083063 Iustin Pop

1988 a8083063 Iustin Pop
  """
1989 dcb93971 Michael Hanselmann
  _OP_REQP = ["output_fields"]
1990 a8083063 Iustin Pop
1991 a8083063 Iustin Pop
  def CheckPrereq(self):
1992 a8083063 Iustin Pop
    """Check prerequisites.
1993 a8083063 Iustin Pop

1994 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1995 a8083063 Iustin Pop

1996 a8083063 Iustin Pop
    """
1997 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
1998 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
1999 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2000 dcb93971 Michael Hanselmann
                               "disk_template", "ip", "mac", "bridge"],
2001 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2002 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2003 a8083063 Iustin Pop
2004 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2005 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2006 a8083063 Iustin Pop

2007 a8083063 Iustin Pop
    """
2008 a8083063 Iustin Pop
    instance_names = utils.NiceSort(self.cfg.GetInstanceList())
2009 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2010 a8083063 Iustin Pop
                     in instance_names]
2011 a8083063 Iustin Pop
2012 a8083063 Iustin Pop
    # begin data gathering
2013 a8083063 Iustin Pop
2014 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2015 a8083063 Iustin Pop
2016 a8083063 Iustin Pop
    bad_nodes = []
2017 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2018 a8083063 Iustin Pop
      live_data = {}
2019 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2020 a8083063 Iustin Pop
      for name in nodes:
2021 a8083063 Iustin Pop
        result = node_data[name]
2022 a8083063 Iustin Pop
        if result:
2023 a8083063 Iustin Pop
          live_data.update(result)
2024 a8083063 Iustin Pop
        elif result == False:
2025 a8083063 Iustin Pop
          bad_nodes.append(name)
2026 a8083063 Iustin Pop
        # else no instance is alive
2027 a8083063 Iustin Pop
    else:
2028 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2029 a8083063 Iustin Pop
2030 a8083063 Iustin Pop
    # end data gathering
2031 a8083063 Iustin Pop
2032 a8083063 Iustin Pop
    output = []
2033 a8083063 Iustin Pop
    for instance in instance_list:
2034 a8083063 Iustin Pop
      iout = []
2035 a8083063 Iustin Pop
      for field in self.op.output_fields:
2036 a8083063 Iustin Pop
        if field == "name":
2037 a8083063 Iustin Pop
          val = instance.name
2038 a8083063 Iustin Pop
        elif field == "os":
2039 a8083063 Iustin Pop
          val = instance.os
2040 a8083063 Iustin Pop
        elif field == "pnode":
2041 a8083063 Iustin Pop
          val = instance.primary_node
2042 a8083063 Iustin Pop
        elif field == "snodes":
2043 a8083063 Iustin Pop
          val = ",".join(instance.secondary_nodes) or "-"
2044 a8083063 Iustin Pop
        elif field == "admin_state":
2045 a8083063 Iustin Pop
          if instance.status == "down":
2046 a8083063 Iustin Pop
            val = "no"
2047 a8083063 Iustin Pop
          else:
2048 a8083063 Iustin Pop
            val = "yes"
2049 a8083063 Iustin Pop
        elif field == "oper_state":
2050 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2051 a8083063 Iustin Pop
            val = "(node down)"
2052 a8083063 Iustin Pop
          else:
2053 a8083063 Iustin Pop
            if live_data.get(instance.name):
2054 a8083063 Iustin Pop
              val = "running"
2055 a8083063 Iustin Pop
            else:
2056 a8083063 Iustin Pop
              val = "stopped"
2057 a8083063 Iustin Pop
        elif field == "admin_ram":
2058 a8083063 Iustin Pop
          val = instance.memory
2059 a8083063 Iustin Pop
        elif field == "oper_ram":
2060 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2061 a8083063 Iustin Pop
            val = "(node down)"
2062 a8083063 Iustin Pop
          elif instance.name in live_data:
2063 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2064 a8083063 Iustin Pop
          else:
2065 a8083063 Iustin Pop
            val = "-"
2066 a8083063 Iustin Pop
        elif field == "disk_template":
2067 a8083063 Iustin Pop
          val = instance.disk_template
2068 a8083063 Iustin Pop
        elif field == "ip":
2069 a8083063 Iustin Pop
          val = instance.nics[0].ip
2070 a8083063 Iustin Pop
        elif field == "bridge":
2071 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2072 a8083063 Iustin Pop
        elif field == "mac":
2073 a8083063 Iustin Pop
          val = instance.nics[0].mac
2074 a8083063 Iustin Pop
        else:
2075 a8083063 Iustin Pop
          raise errors.ParameterError, field
2076 a8083063 Iustin Pop
        val = str(val)
2077 a8083063 Iustin Pop
        iout.append(val)
2078 a8083063 Iustin Pop
      output.append(iout)
2079 a8083063 Iustin Pop
2080 a8083063 Iustin Pop
    return output
2081 a8083063 Iustin Pop
2082 a8083063 Iustin Pop
2083 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2084 a8083063 Iustin Pop
  """Failover an instance.
2085 a8083063 Iustin Pop

2086 a8083063 Iustin Pop
  """
2087 a8083063 Iustin Pop
  HPATH = "instance-failover"
2088 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2089 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2090 a8083063 Iustin Pop
2091 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2092 a8083063 Iustin Pop
    """Build hooks env.
2093 a8083063 Iustin Pop

2094 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2095 a8083063 Iustin Pop

2096 a8083063 Iustin Pop
    """
2097 a8083063 Iustin Pop
    env = {
2098 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
2099 a8083063 Iustin Pop
      "INSTANCE_PRIMARY": self.instance.primary_node,
2100 a8083063 Iustin Pop
      "INSTANCE_SECONDARIES": " ".join(self.instance.secondary_nodes),
2101 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2102 a8083063 Iustin Pop
      }
2103 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2104 a8083063 Iustin Pop
    return env, nl, nl
2105 a8083063 Iustin Pop
2106 a8083063 Iustin Pop
  def CheckPrereq(self):
2107 a8083063 Iustin Pop
    """Check prerequisites.
2108 a8083063 Iustin Pop

2109 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2110 a8083063 Iustin Pop

2111 a8083063 Iustin Pop
    """
2112 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2113 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2114 a8083063 Iustin Pop
    if instance is None:
2115 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2116 a8083063 Iustin Pop
                                   self.op.instance_name)
2117 a8083063 Iustin Pop
2118 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2119 3a7c308e Guido Trotter
    target_node = instance.secondary_nodes[0]
2120 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2121 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2122 3a7c308e Guido Trotter
    if not info:
2123 3a7c308e Guido Trotter
      raise errors.OpPrereqError, ("Cannot get current information"
2124 3a7c308e Guido Trotter
                                   " from node '%s'" % nodeinfo)
2125 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2126 3a7c308e Guido Trotter
      raise errors.OpPrereqError, ("Not enough memory on target node %s."
2127 3a7c308e Guido Trotter
                                   " %d MB available, %d MB required" %
2128 3a7c308e Guido Trotter
                                   (target_node, info['memory_free'],
2129 3a7c308e Guido Trotter
                                    instance.memory))
2130 3a7c308e Guido Trotter
2131 a8083063 Iustin Pop
    # check bridge existance
2132 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2133 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
2134 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("one or more target bridges %s does not"
2135 a8083063 Iustin Pop
                                   " exist on destination node '%s'" %
2136 a8083063 Iustin Pop
                                   (brlist, instance.primary_node))
2137 a8083063 Iustin Pop
2138 a8083063 Iustin Pop
    self.instance = instance
2139 a8083063 Iustin Pop
2140 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2141 a8083063 Iustin Pop
    """Failover an instance.
2142 a8083063 Iustin Pop

2143 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2144 a8083063 Iustin Pop
    starting it on the secondary.
2145 a8083063 Iustin Pop

2146 a8083063 Iustin Pop
    """
2147 a8083063 Iustin Pop
    instance = self.instance
2148 a8083063 Iustin Pop
2149 a8083063 Iustin Pop
    source_node = instance.primary_node
2150 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2151 a8083063 Iustin Pop
2152 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2153 a8083063 Iustin Pop
    for dev in instance.disks:
2154 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2155 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2156 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2157 a8083063 Iustin Pop
          raise errors.OpExecError, ("Disk %s is degraded on target node,"
2158 a8083063 Iustin Pop
                                     " aborting failover." % dev.iv_name)
2159 a8083063 Iustin Pop
2160 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2161 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2162 a8083063 Iustin Pop
2163 a8083063 Iustin Pop
    if not nodeinfo:
2164 a8083063 Iustin Pop
      raise errors.OpExecError, ("Could not contact target node %s." %
2165 a8083063 Iustin Pop
                                 target_node)
2166 a8083063 Iustin Pop
2167 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2168 a8083063 Iustin Pop
    memory = instance.memory
2169 a8083063 Iustin Pop
    if memory > free_memory:
2170 a8083063 Iustin Pop
      raise errors.OpExecError, ("Not enough memory to create instance %s on"
2171 a8083063 Iustin Pop
                                 " node %s. needed %s MiB, available %s MiB" %
2172 a8083063 Iustin Pop
                                 (instance.name, target_node, memory,
2173 a8083063 Iustin Pop
                                  free_memory))
2174 a8083063 Iustin Pop
2175 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2176 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2177 a8083063 Iustin Pop
                (instance.name, source_node))
2178 a8083063 Iustin Pop
2179 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2180 a8083063 Iustin Pop
      logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2181 a8083063 Iustin Pop
                   " anyway. Please make sure node %s is down"  %
2182 a8083063 Iustin Pop
                   (instance.name, source_node, source_node))
2183 a8083063 Iustin Pop
2184 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2185 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2186 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't shut down the instance's disks.")
2187 a8083063 Iustin Pop
2188 a8083063 Iustin Pop
    instance.primary_node = target_node
2189 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2190 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2191 a8083063 Iustin Pop
2192 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2193 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2194 a8083063 Iustin Pop
                (instance.name, target_node))
2195 a8083063 Iustin Pop
2196 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2197 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2198 a8083063 Iustin Pop
    if not disks_ok:
2199 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2200 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't activate the instance's disks")
2201 a8083063 Iustin Pop
2202 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2203 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2204 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2205 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2206 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2207 a8083063 Iustin Pop
2208 a8083063 Iustin Pop
2209 a8083063 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, device):
2210 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2211 a8083063 Iustin Pop

2212 a8083063 Iustin Pop
  This always creates all devices.
2213 a8083063 Iustin Pop

2214 a8083063 Iustin Pop
  """
2215 a8083063 Iustin Pop
  if device.children:
2216 a8083063 Iustin Pop
    for child in device.children:
2217 a8083063 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, child):
2218 a8083063 Iustin Pop
        return False
2219 a8083063 Iustin Pop
2220 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2221 a8083063 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size, True)
2222 a8083063 Iustin Pop
  if not new_id:
2223 a8083063 Iustin Pop
    return False
2224 a8083063 Iustin Pop
  if device.physical_id is None:
2225 a8083063 Iustin Pop
    device.physical_id = new_id
2226 a8083063 Iustin Pop
  return True
2227 a8083063 Iustin Pop
2228 a8083063 Iustin Pop
2229 a8083063 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, device, force):
2230 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2231 a8083063 Iustin Pop

2232 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2233 a8083063 Iustin Pop
  all its children.
2234 a8083063 Iustin Pop

2235 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2236 a8083063 Iustin Pop

2237 a8083063 Iustin Pop
  """
2238 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2239 a8083063 Iustin Pop
    force = True
2240 a8083063 Iustin Pop
  if device.children:
2241 a8083063 Iustin Pop
    for child in device.children:
2242 a8083063 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, child, force):
2243 a8083063 Iustin Pop
        return False
2244 a8083063 Iustin Pop
2245 a8083063 Iustin Pop
  if not force:
2246 a8083063 Iustin Pop
    return True
2247 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2248 a8083063 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size, False)
2249 a8083063 Iustin Pop
  if not new_id:
2250 a8083063 Iustin Pop
    return False
2251 a8083063 Iustin Pop
  if device.physical_id is None:
2252 a8083063 Iustin Pop
    device.physical_id = new_id
2253 a8083063 Iustin Pop
  return True
2254 a8083063 Iustin Pop
2255 a8083063 Iustin Pop
2256 a8083063 Iustin Pop
def _GenerateMDDRBDBranch(cfg, vgname, primary, secondary, size, base):
2257 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2258 a8083063 Iustin Pop

2259 a8083063 Iustin Pop
  """
2260 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2261 a8083063 Iustin Pop
  base = "%s_%s" % (base, port)
2262 a8083063 Iustin Pop
  dev_data = objects.Disk(dev_type="lvm", size=size,
2263 a8083063 Iustin Pop
                          logical_id=(vgname, "%s.data" % base))
2264 a8083063 Iustin Pop
  dev_meta = objects.Disk(dev_type="lvm", size=128,
2265 a8083063 Iustin Pop
                          logical_id=(vgname, "%s.meta" % base))
2266 a8083063 Iustin Pop
  drbd_dev = objects.Disk(dev_type="drbd", size=size,
2267 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2268 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2269 a8083063 Iustin Pop
  return drbd_dev
2270 a8083063 Iustin Pop
2271 a8083063 Iustin Pop
2272 a8083063 Iustin Pop
def _GenerateDiskTemplate(cfg, vgname, template_name,
2273 a8083063 Iustin Pop
                          instance_name, primary_node,
2274 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2275 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2276 a8083063 Iustin Pop

2277 a8083063 Iustin Pop
  """
2278 a8083063 Iustin Pop
  #TODO: compute space requirements
2279 a8083063 Iustin Pop
2280 a8083063 Iustin Pop
  if template_name == "diskless":
2281 a8083063 Iustin Pop
    disks = []
2282 a8083063 Iustin Pop
  elif template_name == "plain":
2283 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2284 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2285 a8083063 Iustin Pop
    sda_dev = objects.Disk(dev_type="lvm", size=disk_sz,
2286 a8083063 Iustin Pop
                           logical_id=(vgname, "%s.os" % instance_name),
2287 a8083063 Iustin Pop
                           iv_name = "sda")
2288 a8083063 Iustin Pop
    sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz,
2289 a8083063 Iustin Pop
                           logical_id=(vgname, "%s.swap" % instance_name),
2290 a8083063 Iustin Pop
                           iv_name = "sdb")
2291 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2292 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2293 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2294 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2295 a8083063 Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz,
2296 a8083063 Iustin Pop
                              logical_id=(vgname, "%s.os_m1" % instance_name))
2297 a8083063 Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz,
2298 a8083063 Iustin Pop
                              logical_id=(vgname, "%s.os_m2" % instance_name))
2299 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda",
2300 a8083063 Iustin Pop
                              size=disk_sz,
2301 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2302 a8083063 Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz,
2303 a8083063 Iustin Pop
                              logical_id=(vgname, "%s.swap_m1" %
2304 a8083063 Iustin Pop
                                          instance_name))
2305 a8083063 Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz,
2306 a8083063 Iustin Pop
                              logical_id=(vgname, "%s.swap_m2" %
2307 a8083063 Iustin Pop
                                          instance_name))
2308 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb",
2309 a8083063 Iustin Pop
                              size=swap_sz,
2310 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2311 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2312 a8083063 Iustin Pop
  elif template_name == "remote_raid1":
2313 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2314 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2315 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2316 a8083063 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, vgname,
2317 a8083063 Iustin Pop
                                         primary_node, remote_node, disk_sz,
2318 a8083063 Iustin Pop
                                         "%s-sda" % instance_name)
2319 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda",
2320 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2321 a8083063 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, vgname,
2322 a8083063 Iustin Pop
                                         primary_node, remote_node, swap_sz,
2323 a8083063 Iustin Pop
                                         "%s-sdb" % instance_name)
2324 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb",
2325 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2326 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2327 a8083063 Iustin Pop
  else:
2328 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2329 a8083063 Iustin Pop
  return disks
2330 a8083063 Iustin Pop
2331 a8083063 Iustin Pop
2332 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2333 a8083063 Iustin Pop
  """Create all disks for an instance.
2334 a8083063 Iustin Pop

2335 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2336 a8083063 Iustin Pop

2337 a8083063 Iustin Pop
  Args:
2338 a8083063 Iustin Pop
    instance: the instance object
2339 a8083063 Iustin Pop

2340 a8083063 Iustin Pop
  Returns:
2341 a8083063 Iustin Pop
    True or False showing the success of the creation process
2342 a8083063 Iustin Pop

2343 a8083063 Iustin Pop
  """
2344 a8083063 Iustin Pop
  for device in instance.disks:
2345 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2346 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2347 a8083063 Iustin Pop
    #HARDCODE
2348 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2349 a8083063 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False):
2350 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2351 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2352 a8083063 Iustin Pop
        return False
2353 a8083063 Iustin Pop
    #HARDCODE
2354 a8083063 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device):
2355 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2356 a8083063 Iustin Pop
                   device.iv_name)
2357 a8083063 Iustin Pop
      return False
2358 a8083063 Iustin Pop
  return True
2359 a8083063 Iustin Pop
2360 a8083063 Iustin Pop
2361 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2362 a8083063 Iustin Pop
  """Remove all disks for an instance.
2363 a8083063 Iustin Pop

2364 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2365 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2366 a8083063 Iustin Pop
  be remove, the removal will continue with the other ones (compare
2367 a8083063 Iustin Pop
  with `_CreateDisks()`).
2368 a8083063 Iustin Pop

2369 a8083063 Iustin Pop
  Args:
2370 a8083063 Iustin Pop
    instance: the instance object
2371 a8083063 Iustin Pop

2372 a8083063 Iustin Pop
  Returns:
2373 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2374 a8083063 Iustin Pop

2375 a8083063 Iustin Pop
  """
2376 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2377 a8083063 Iustin Pop
2378 a8083063 Iustin Pop
  result = True
2379 a8083063 Iustin Pop
  for device in instance.disks:
2380 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2381 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2382 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2383 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2384 a8083063 Iustin Pop
                     " continuing anyway" %
2385 a8083063 Iustin Pop
                     (device.iv_name, node))
2386 a8083063 Iustin Pop
        result = False
2387 a8083063 Iustin Pop
  return result
2388 a8083063 Iustin Pop
2389 a8083063 Iustin Pop
2390 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2391 a8083063 Iustin Pop
  """Create an instance.
2392 a8083063 Iustin Pop

2393 a8083063 Iustin Pop
  """
2394 a8083063 Iustin Pop
  HPATH = "instance-add"
2395 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2396 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2397 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2398 a8083063 Iustin Pop
              "wait_for_sync"]
2399 a8083063 Iustin Pop
2400 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2401 a8083063 Iustin Pop
    """Build hooks env.
2402 a8083063 Iustin Pop

2403 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2404 a8083063 Iustin Pop

2405 a8083063 Iustin Pop
    """
2406 a8083063 Iustin Pop
    env = {
2407 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
2408 a8083063 Iustin Pop
      "INSTANCE_PRIMARY": self.op.pnode,
2409 a8083063 Iustin Pop
      "INSTANCE_SECONDARIES": " ".join(self.secondaries),
2410 a8083063 Iustin Pop
      "DISK_TEMPLATE": self.op.disk_template,
2411 a8083063 Iustin Pop
      "MEM_SIZE": self.op.mem_size,
2412 a8083063 Iustin Pop
      "DISK_SIZE": self.op.disk_size,
2413 a8083063 Iustin Pop
      "SWAP_SIZE": self.op.swap_size,
2414 a8083063 Iustin Pop
      "VCPUS": self.op.vcpus,
2415 a8083063 Iustin Pop
      "BRIDGE": self.op.bridge,
2416 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2417 a8083063 Iustin Pop
      }
2418 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2419 a8083063 Iustin Pop
      env["SRC_NODE"] = self.op.src_node
2420 a8083063 Iustin Pop
      env["SRC_PATH"] = self.op.src_path
2421 a8083063 Iustin Pop
      env["SRC_IMAGE"] = self.src_image
2422 a8083063 Iustin Pop
    if self.inst_ip:
2423 a8083063 Iustin Pop
      env["INSTANCE_IP"] = self.inst_ip
2424 a8083063 Iustin Pop
2425 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2426 a8083063 Iustin Pop
          self.secondaries)
2427 a8083063 Iustin Pop
    return env, nl, nl
2428 a8083063 Iustin Pop
2429 a8083063 Iustin Pop
2430 a8083063 Iustin Pop
  def CheckPrereq(self):
2431 a8083063 Iustin Pop
    """Check prerequisites.
2432 a8083063 Iustin Pop

2433 a8083063 Iustin Pop
    """
2434 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2435 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2436 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid instance creation mode '%s'" %
2437 a8083063 Iustin Pop
                                   self.op.mode)
2438 a8083063 Iustin Pop
2439 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2440 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2441 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2442 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2443 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Importing an instance requires source"
2444 a8083063 Iustin Pop
                                     " node and path options")
2445 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2446 a8083063 Iustin Pop
      if src_node_full is None:
2447 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Unknown source node '%s'" % src_node)
2448 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2449 a8083063 Iustin Pop
2450 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2451 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("The source path must be absolute")
2452 a8083063 Iustin Pop
2453 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2454 a8083063 Iustin Pop
2455 a8083063 Iustin Pop
      if not export_info:
2456 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("No export found in dir %s" % src_path)
2457 a8083063 Iustin Pop
2458 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2459 a8083063 Iustin Pop
        raise errors.ProgrammerError, ("Corrupted export config")
2460 a8083063 Iustin Pop
2461 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2462 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2463 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Wrong export version %s (wanted %d)" %
2464 a8083063 Iustin Pop
                                     (ei_version, constants.EXPORT_VERSION))
2465 a8083063 Iustin Pop
2466 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2467 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Can't import instance with more than"
2468 a8083063 Iustin Pop
                                     " one data disk")
2469 a8083063 Iustin Pop
2470 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2471 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2472 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2473 a8083063 Iustin Pop
                                                         'disk0_dump'))
2474 a8083063 Iustin Pop
      self.src_image = diskimage
2475 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2476 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2477 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("No guest OS specified")
2478 a8083063 Iustin Pop
2479 a8083063 Iustin Pop
    # check primary node
2480 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2481 a8083063 Iustin Pop
    if pnode is None:
2482 fe7b0351 Michael Hanselmann
      raise errors.OpPrereqError, ("Primary node '%s' is unknown" %
2483 a8083063 Iustin Pop
                                   self.op.pnode)
2484 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2485 a8083063 Iustin Pop
    self.pnode = pnode
2486 a8083063 Iustin Pop
    self.secondaries = []
2487 a8083063 Iustin Pop
    # disk template and mirror node verification
2488 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2489 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid disk template name")
2490 a8083063 Iustin Pop
2491 a8083063 Iustin Pop
    if self.op.disk_template == constants.DT_REMOTE_RAID1:
2492 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2493 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("The 'remote_raid1' disk template needs"
2494 a8083063 Iustin Pop
                                     " a mirror node")
2495 a8083063 Iustin Pop
2496 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2497 a8083063 Iustin Pop
      if snode_name is None:
2498 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Unknown secondary node '%s'" %
2499 a8083063 Iustin Pop
                                     self.op.snode)
2500 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2501 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("The secondary node cannot be"
2502 a8083063 Iustin Pop
                                     " the primary node.")
2503 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2504 a8083063 Iustin Pop
2505 ed1ebc60 Guido Trotter
    # Check lv size requirements
2506 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2507 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2508 ed1ebc60 Guido Trotter
2509 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2510 ed1ebc60 Guido Trotter
    req_size_dict = {
2511 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2512 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2513 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2514 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2515 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2516 ed1ebc60 Guido Trotter
    }
2517 ed1ebc60 Guido Trotter
2518 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2519 ed1ebc60 Guido Trotter
      raise errors.ProgrammerError, ("Disk template '%s' size requirement"
2520 ed1ebc60 Guido Trotter
                                     " is unknown" %  self.op.disk_template)
2521 ed1ebc60 Guido Trotter
2522 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2523 ed1ebc60 Guido Trotter
2524 ed1ebc60 Guido Trotter
    for node in nodenames:
2525 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2526 ed1ebc60 Guido Trotter
      if not info:
2527 ed1ebc60 Guido Trotter
        raise errors.OpPrereqError, ("Cannot get current information"
2528 ed1ebc60 Guido Trotter
                                     " from node '%s'" % nodeinfo)
2529 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2530 ed1ebc60 Guido Trotter
        raise errors.OpPrereqError, ("Not enough disk space on target node %s."
2531 ed1ebc60 Guido Trotter
                                     " %d MB available, %d MB required" %
2532 ed1ebc60 Guido Trotter
                                     (node, info['vg_free'], req_size))
2533 ed1ebc60 Guido Trotter
2534 a8083063 Iustin Pop
    # os verification
2535 a8083063 Iustin Pop
    os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2536 a8083063 Iustin Pop
    if not isinstance(os_obj, objects.OS):
2537 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("OS '%s' not in supported os list for"
2538 a8083063 Iustin Pop
                                   " primary node"  % self.op.os_type)
2539 a8083063 Iustin Pop
2540 a8083063 Iustin Pop
    # instance verification
2541 a8083063 Iustin Pop
    hostname1 = utils.LookupHostname(self.op.instance_name)
2542 a8083063 Iustin Pop
    if not hostname1:
2543 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance name '%s' not found in dns" %
2544 a8083063 Iustin Pop
                                   self.op.instance_name)
2545 a8083063 Iustin Pop
2546 a8083063 Iustin Pop
    self.op.instance_name = instance_name = hostname1['hostname']
2547 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2548 a8083063 Iustin Pop
    if instance_name in instance_list:
2549 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' is already in the cluster" %
2550 a8083063 Iustin Pop
                                   instance_name)
2551 a8083063 Iustin Pop
2552 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2553 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2554 a8083063 Iustin Pop
      inst_ip = None
2555 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2556 a8083063 Iustin Pop
      inst_ip = hostname1['ip']
2557 a8083063 Iustin Pop
    else:
2558 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2559 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("given IP address '%s' doesn't look"
2560 a8083063 Iustin Pop
                                     " like a valid IP" % ip)
2561 a8083063 Iustin Pop
      inst_ip = ip
2562 a8083063 Iustin Pop
    self.inst_ip = inst_ip
2563 a8083063 Iustin Pop
2564 a8083063 Iustin Pop
    command = ["fping", "-q", hostname1['ip']]
2565 a8083063 Iustin Pop
    result = utils.RunCmd(command)
2566 a8083063 Iustin Pop
    if not result.failed:
2567 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("IP %s of instance %s already in use" %
2568 a8083063 Iustin Pop
                                   (hostname1['ip'], instance_name))
2569 a8083063 Iustin Pop
2570 a8083063 Iustin Pop
    # bridge verification
2571 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
2572 a8083063 Iustin Pop
    if bridge is None:
2573 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
2574 a8083063 Iustin Pop
    else:
2575 a8083063 Iustin Pop
      self.op.bridge = bridge
2576 a8083063 Iustin Pop
2577 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
2578 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("target bridge '%s' does not exist on"
2579 a8083063 Iustin Pop
                                   " destination node '%s'" %
2580 a8083063 Iustin Pop
                                   (self.op.bridge, pnode.name))
2581 a8083063 Iustin Pop
2582 a8083063 Iustin Pop
    if self.op.start:
2583 a8083063 Iustin Pop
      self.instance_status = 'up'
2584 a8083063 Iustin Pop
    else:
2585 a8083063 Iustin Pop
      self.instance_status = 'down'
2586 a8083063 Iustin Pop
2587 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2588 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
2589 a8083063 Iustin Pop

2590 a8083063 Iustin Pop
    """
2591 a8083063 Iustin Pop
    instance = self.op.instance_name
2592 a8083063 Iustin Pop
    pnode_name = self.pnode.name
2593 a8083063 Iustin Pop
2594 a8083063 Iustin Pop
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
2595 a8083063 Iustin Pop
    if self.inst_ip is not None:
2596 a8083063 Iustin Pop
      nic.ip = self.inst_ip
2597 a8083063 Iustin Pop
2598 a8083063 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg, self.cfg.GetVGName(),
2599 a8083063 Iustin Pop
                                  self.op.disk_template,
2600 a8083063 Iustin Pop
                                  instance, pnode_name,
2601 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
2602 a8083063 Iustin Pop
                                  self.op.swap_size)
2603 a8083063 Iustin Pop
2604 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
2605 a8083063 Iustin Pop
                            primary_node=pnode_name,
2606 a8083063 Iustin Pop
                            memory=self.op.mem_size,
2607 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
2608 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
2609 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
2610 a8083063 Iustin Pop
                            status=self.instance_status,
2611 a8083063 Iustin Pop
                            )
2612 a8083063 Iustin Pop
2613 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
2614 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
2615 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2616 a8083063 Iustin Pop
      raise errors.OpExecError, ("Device creation failed, reverting...")
2617 a8083063 Iustin Pop
2618 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
2619 a8083063 Iustin Pop
2620 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
2621 a8083063 Iustin Pop
2622 a8083063 Iustin Pop
    if self.op.wait_for_sync:
2623 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj)
2624 a8083063 Iustin Pop
    elif iobj.disk_template == "remote_raid1":
2625 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
2626 a8083063 Iustin Pop
      time.sleep(15)
2627 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
2628 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
2629 a8083063 Iustin Pop
    else:
2630 a8083063 Iustin Pop
      disk_abort = False
2631 a8083063 Iustin Pop
2632 a8083063 Iustin Pop
    if disk_abort:
2633 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2634 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
2635 a8083063 Iustin Pop
      raise errors.OpExecError, ("There are some degraded disks for"
2636 a8083063 Iustin Pop
                                      " this instance")
2637 a8083063 Iustin Pop
2638 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
2639 a8083063 Iustin Pop
                (instance, pnode_name))
2640 a8083063 Iustin Pop
2641 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
2642 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
2643 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
2644 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
2645 a8083063 Iustin Pop
          raise errors.OpExecError, ("could not add os for instance %s"
2646 a8083063 Iustin Pop
                                          " on node %s" %
2647 a8083063 Iustin Pop
                                          (instance, pnode_name))
2648 a8083063 Iustin Pop
2649 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
2650 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
2651 a8083063 Iustin Pop
        src_node = self.op.src_node
2652 a8083063 Iustin Pop
        src_image = self.src_image
2653 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
2654 a8083063 Iustin Pop
                                                src_node, src_image):
2655 a8083063 Iustin Pop
          raise errors.OpExecError, ("Could not import os for instance"
2656 a8083063 Iustin Pop
                                          " %s on node %s" %
2657 a8083063 Iustin Pop
                                          (instance, pnode_name))
2658 a8083063 Iustin Pop
      else:
2659 a8083063 Iustin Pop
        # also checked in the prereq part
2660 a8083063 Iustin Pop
        raise errors.ProgrammerError, ("Unknown OS initialization mode '%s'"
2661 a8083063 Iustin Pop
                                       % self.op.mode)
2662 a8083063 Iustin Pop
2663 a8083063 Iustin Pop
    if self.op.start:
2664 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
2665 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
2666 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
2667 a8083063 Iustin Pop
        raise errors.OpExecError, ("Could not start instance")
2668 a8083063 Iustin Pop
2669 a8083063 Iustin Pop
2670 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
2671 a8083063 Iustin Pop
  """Connect to an instance's console.
2672 a8083063 Iustin Pop

2673 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
2674 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
2675 a8083063 Iustin Pop
  console.
2676 a8083063 Iustin Pop

2677 a8083063 Iustin Pop
  """
2678 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2679 a8083063 Iustin Pop
2680 a8083063 Iustin Pop
  def CheckPrereq(self):
2681 a8083063 Iustin Pop
    """Check prerequisites.
2682 a8083063 Iustin Pop

2683 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2684 a8083063 Iustin Pop

2685 a8083063 Iustin Pop
    """
2686 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2687 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2688 a8083063 Iustin Pop
    if instance is None:
2689 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2690 a8083063 Iustin Pop
                                   self.op.instance_name)
2691 a8083063 Iustin Pop
    self.instance = instance
2692 a8083063 Iustin Pop
2693 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2694 a8083063 Iustin Pop
    """Connect to the console of an instance
2695 a8083063 Iustin Pop

2696 a8083063 Iustin Pop
    """
2697 a8083063 Iustin Pop
    instance = self.instance
2698 a8083063 Iustin Pop
    node = instance.primary_node
2699 a8083063 Iustin Pop
2700 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
2701 a8083063 Iustin Pop
    if node_insts is False:
2702 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't connect to node %s." % node)
2703 a8083063 Iustin Pop
2704 a8083063 Iustin Pop
    if instance.name not in node_insts:
2705 a8083063 Iustin Pop
      raise errors.OpExecError, ("Instance %s is not running." % instance.name)
2706 a8083063 Iustin Pop
2707 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
2708 a8083063 Iustin Pop
2709 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
2710 a8083063 Iustin Pop
    console_cmd = hyper.GetShellCommandForConsole(instance.name)
2711 a8083063 Iustin Pop
    return node, console_cmd
2712 a8083063 Iustin Pop
2713 a8083063 Iustin Pop
2714 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
2715 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
2716 a8083063 Iustin Pop

2717 a8083063 Iustin Pop
  """
2718 a8083063 Iustin Pop
  HPATH = "mirror-add"
2719 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2720 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
2721 a8083063 Iustin Pop
2722 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2723 a8083063 Iustin Pop
    """Build hooks env.
2724 a8083063 Iustin Pop

2725 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2726 a8083063 Iustin Pop

2727 a8083063 Iustin Pop
    """
2728 a8083063 Iustin Pop
    env = {
2729 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
2730 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
2731 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
2732 a8083063 Iustin Pop
      }
2733 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
2734 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
2735 a8083063 Iustin Pop
    return env, nl, nl
2736 a8083063 Iustin Pop
2737 a8083063 Iustin Pop
  def CheckPrereq(self):
2738 a8083063 Iustin Pop
    """Check prerequisites.
2739 a8083063 Iustin Pop

2740 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2741 a8083063 Iustin Pop

2742 a8083063 Iustin Pop
    """
2743 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2744 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2745 a8083063 Iustin Pop
    if instance is None:
2746 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2747 a8083063 Iustin Pop
                                   self.op.instance_name)
2748 a8083063 Iustin Pop
    self.instance = instance
2749 a8083063 Iustin Pop
2750 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
2751 a8083063 Iustin Pop
    if remote_node is None:
2752 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' not known" % self.op.remote_node)
2753 a8083063 Iustin Pop
    self.remote_node = remote_node
2754 a8083063 Iustin Pop
2755 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
2756 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("The specified node is the primary node of"
2757 a8083063 Iustin Pop
                                   " the instance.")
2758 a8083063 Iustin Pop
2759 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2760 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance's disk layout is not"
2761 a8083063 Iustin Pop
                                   " remote_raid1.")
2762 a8083063 Iustin Pop
    for disk in instance.disks:
2763 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
2764 a8083063 Iustin Pop
        break
2765 a8083063 Iustin Pop
    else:
2766 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Can't find this device ('%s') in the"
2767 a8083063 Iustin Pop
                                   " instance." % self.op.disk_name)
2768 a8083063 Iustin Pop
    if len(disk.children) > 1:
2769 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("The device already has two slave"
2770 a8083063 Iustin Pop
                                   " devices.\n"
2771 a8083063 Iustin Pop
                                   "This would create a 3-disk raid1"
2772 a8083063 Iustin Pop
                                   " which we don't allow.")
2773 a8083063 Iustin Pop
    self.disk = disk
2774 a8083063 Iustin Pop
2775 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2776 a8083063 Iustin Pop
    """Add the mirror component
2777 a8083063 Iustin Pop

2778 a8083063 Iustin Pop
    """
2779 a8083063 Iustin Pop
    disk = self.disk
2780 a8083063 Iustin Pop
    instance = self.instance
2781 a8083063 Iustin Pop
2782 a8083063 Iustin Pop
    remote_node = self.remote_node
2783 72d6c464 Michael Hanselmann
    new_drbd = _GenerateMDDRBDBranch(self.cfg, self.cfg.GetVGName(),
2784 72d6c464 Michael Hanselmann
                                     instance.primary_node, remote_node,
2785 72d6c464 Michael Hanselmann
                                     disk.size, "%s-%s" %
2786 a8083063 Iustin Pop
                                     (instance.name, self.op.disk_name))
2787 a8083063 Iustin Pop
2788 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
2789 a8083063 Iustin Pop
    #HARDCODE
2790 a8083063 Iustin Pop
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False):
2791 a8083063 Iustin Pop
      raise errors.OpExecError, ("Failed to create new component on secondary"
2792 a8083063 Iustin Pop
                                 " node %s" % remote_node)
2793 a8083063 Iustin Pop
2794 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
2795 a8083063 Iustin Pop
    #HARDCODE
2796 a8083063 Iustin Pop
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd):
2797 a8083063 Iustin Pop
      # remove secondary dev
2798 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
2799 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
2800 a8083063 Iustin Pop
      raise errors.OpExecError, ("Failed to create volume on primary")
2801 a8083063 Iustin Pop
2802 a8083063 Iustin Pop
    # the device exists now
2803 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
2804 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
2805 a8083063 Iustin Pop
    if not rpc.call_blockdev_addchild(instance.primary_node,
2806 a8083063 Iustin Pop
                                           disk, new_drbd):
2807 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
2808 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
2809 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
2810 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
2811 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
2812 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
2813 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
2814 a8083063 Iustin Pop
      raise errors.OpExecError, "Can't add mirror component to md array"
2815 a8083063 Iustin Pop
2816 a8083063 Iustin Pop
    disk.children.append(new_drbd)
2817 a8083063 Iustin Pop
2818 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2819 a8083063 Iustin Pop
2820 a8083063 Iustin Pop
    _WaitForSync(self.cfg, instance)
2821 a8083063 Iustin Pop
2822 a8083063 Iustin Pop
    return 0
2823 a8083063 Iustin Pop
2824 a8083063 Iustin Pop
2825 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
2826 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
2827 a8083063 Iustin Pop

2828 a8083063 Iustin Pop
  """
2829 a8083063 Iustin Pop
  HPATH = "mirror-remove"
2830 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2831 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
2832 a8083063 Iustin Pop
2833 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2834 a8083063 Iustin Pop
    """Build hooks env.
2835 a8083063 Iustin Pop

2836 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2837 a8083063 Iustin Pop

2838 a8083063 Iustin Pop
    """
2839 a8083063 Iustin Pop
    env = {
2840 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
2841 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
2842 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
2843 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
2844 a8083063 Iustin Pop
      }
2845 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
2846 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
2847 a8083063 Iustin Pop
    return env, nl, nl
2848 a8083063 Iustin Pop
2849 a8083063 Iustin Pop
  def CheckPrereq(self):
2850 a8083063 Iustin Pop
    """Check prerequisites.
2851 a8083063 Iustin Pop

2852 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2853 a8083063 Iustin Pop

2854 a8083063 Iustin Pop
    """
2855 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2856 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2857 a8083063 Iustin Pop
    if instance is None:
2858 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2859 a8083063 Iustin Pop
                                   self.op.instance_name)
2860 a8083063 Iustin Pop
    self.instance = instance
2861 a8083063 Iustin Pop
2862 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2863 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance's disk layout is not"
2864 a8083063 Iustin Pop
                                   " remote_raid1.")
2865 a8083063 Iustin Pop
    for disk in instance.disks:
2866 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
2867 a8083063 Iustin Pop
        break
2868 a8083063 Iustin Pop
    else:
2869 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Can't find this device ('%s') in the"
2870 a8083063 Iustin Pop
                                   " instance." % self.op.disk_name)
2871 a8083063 Iustin Pop
    for child in disk.children:
2872 a8083063 Iustin Pop
      if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id:
2873 a8083063 Iustin Pop
        break
2874 a8083063 Iustin Pop
    else:
2875 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Can't find the device with this port.")
2876 a8083063 Iustin Pop
2877 a8083063 Iustin Pop
    if len(disk.children) < 2:
2878 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Cannot remove the last component from"
2879 a8083063 Iustin Pop
                                   " a mirror.")
2880 a8083063 Iustin Pop
    self.disk = disk
2881 a8083063 Iustin Pop
    self.child = child
2882 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
2883 a8083063 Iustin Pop
      oid = 1
2884 a8083063 Iustin Pop
    else:
2885 a8083063 Iustin Pop
      oid = 0
2886 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
2887 a8083063 Iustin Pop
2888 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2889 a8083063 Iustin Pop
    """Remove the mirror component
2890 a8083063 Iustin Pop

2891 a8083063 Iustin Pop
    """
2892 a8083063 Iustin Pop
    instance = self.instance
2893 a8083063 Iustin Pop
    disk = self.disk
2894 a8083063 Iustin Pop
    child = self.child
2895 a8083063 Iustin Pop
    logger.Info("remove mirror component")
2896 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
2897 a8083063 Iustin Pop
    if not rpc.call_blockdev_removechild(instance.primary_node,
2898 a8083063 Iustin Pop
                                              disk, child):
2899 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't remove child from mirror.")
2900 a8083063 Iustin Pop
2901 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
2902 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
2903 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
2904 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
2905 a8083063 Iustin Pop
                     " continuing operation." % node)
2906 a8083063 Iustin Pop
2907 a8083063 Iustin Pop
    disk.children.remove(child)
2908 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2909 a8083063 Iustin Pop
2910 a8083063 Iustin Pop
2911 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
2912 a8083063 Iustin Pop
  """Replace the disks of an instance.
2913 a8083063 Iustin Pop

2914 a8083063 Iustin Pop
  """
2915 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
2916 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2917 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2918 a8083063 Iustin Pop
2919 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2920 a8083063 Iustin Pop
    """Build hooks env.
2921 a8083063 Iustin Pop

2922 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2923 a8083063 Iustin Pop

2924 a8083063 Iustin Pop
    """
2925 a8083063 Iustin Pop
    env = {
2926 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
2927 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
2928 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
2929 a8083063 Iustin Pop
      }
2930 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
2931 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
2932 a8083063 Iustin Pop
    return env, nl, nl
2933 a8083063 Iustin Pop
2934 a8083063 Iustin Pop
  def CheckPrereq(self):
2935 a8083063 Iustin Pop
    """Check prerequisites.
2936 a8083063 Iustin Pop

2937 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2938 a8083063 Iustin Pop

2939 a8083063 Iustin Pop
    """
2940 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2941 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2942 a8083063 Iustin Pop
    if instance is None:
2943 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2944 a8083063 Iustin Pop
                                   self.op.instance_name)
2945 a8083063 Iustin Pop
    self.instance = instance
2946 a8083063 Iustin Pop
2947 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2948 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance's disk layout is not"
2949 a8083063 Iustin Pop
                                   " remote_raid1.")
2950 a8083063 Iustin Pop
2951 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
2952 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("The instance has a strange layout,"
2953 a8083063 Iustin Pop
                                   " expected one secondary but found %d" %
2954 a8083063 Iustin Pop
                                   len(instance.secondary_nodes))
2955 a8083063 Iustin Pop
2956 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
2957 a8083063 Iustin Pop
    if remote_node is None:
2958 a8083063 Iustin Pop
      remote_node = instance.secondary_nodes[0]
2959 a8083063 Iustin Pop
    else:
2960 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
2961 a8083063 Iustin Pop
      if remote_node is None:
2962 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Node '%s' not known" %
2963 a8083063 Iustin Pop
                                     self.op.remote_node)
2964 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
2965 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("The specified node is the primary node of"
2966 a8083063 Iustin Pop
                                   " the instance.")
2967 a8083063 Iustin Pop
    self.op.remote_node = remote_node
2968 a8083063 Iustin Pop
2969 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2970 a8083063 Iustin Pop
    """Replace the disks of an instance.
2971 a8083063 Iustin Pop

2972 a8083063 Iustin Pop
    """
2973 a8083063 Iustin Pop
    instance = self.instance
2974 a8083063 Iustin Pop
    iv_names = {}
2975 a8083063 Iustin Pop
    # start of work
2976 a8083063 Iustin Pop
    remote_node = self.op.remote_node
2977 a8083063 Iustin Pop
    cfg = self.cfg
2978 880478f8 Iustin Pop
    vgname = cfg.GetVGName()
2979 a8083063 Iustin Pop
    for dev in instance.disks:
2980 a8083063 Iustin Pop
      size = dev.size
2981 880478f8 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, vgname, instance.primary_node,
2982 880478f8 Iustin Pop
                                       remote_node, size,
2983 a8083063 Iustin Pop
                                       "%s-%s" % (instance.name, dev.iv_name))
2984 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
2985 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
2986 a8083063 Iustin Pop
                  dev.iv_name)
2987 a8083063 Iustin Pop
      #HARDCODE
2988 a8083063 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False):
2989 a8083063 Iustin Pop
        raise errors.OpExecError, ("Failed to create new component on"
2990 a8083063 Iustin Pop
                                   " secondary node %s\n"
2991 a8083063 Iustin Pop
                                   "Full abort, cleanup manually!" %
2992 a8083063 Iustin Pop
                                   remote_node)
2993 a8083063 Iustin Pop
2994 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
2995 a8083063 Iustin Pop
      #HARDCODE
2996 a8083063 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd):
2997 a8083063 Iustin Pop
        # remove secondary dev
2998 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
2999 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3000 a8083063 Iustin Pop
        raise errors.OpExecError("Failed to create volume on primary!\n"
3001 a8083063 Iustin Pop
                                 "Full abort, cleanup manually!!")
3002 a8083063 Iustin Pop
3003 a8083063 Iustin Pop
      # the device exists now
3004 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3005 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3006 a8083063 Iustin Pop
      if not rpc.call_blockdev_addchild(instance.primary_node, dev,
3007 880478f8 Iustin Pop
                                        new_drbd):
3008 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3009 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3010 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3011 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3012 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3013 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3014 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3015 a8083063 Iustin Pop
        raise errors.OpExecError, ("Full abort, cleanup manually!!")
3016 a8083063 Iustin Pop
3017 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3018 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3019 a8083063 Iustin Pop
3020 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3021 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3022 a8083063 Iustin Pop
    # return value
3023 a8083063 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3024 a8083063 Iustin Pop
3025 a8083063 Iustin Pop
    # so check manually all the devices
3026 a8083063 Iustin Pop
    for name in iv_names:
3027 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3028 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3029 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3030 a8083063 Iustin Pop
      if is_degr:
3031 a8083063 Iustin Pop
        raise errors.OpExecError, ("MD device %s is degraded!" % name)
3032 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3033 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3034 a8083063 Iustin Pop
      if is_degr:
3035 a8083063 Iustin Pop
        raise errors.OpExecError, ("New drbd device %s is degraded!" % name)
3036 a8083063 Iustin Pop
3037 a8083063 Iustin Pop
    for name in iv_names:
3038 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3039 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3040 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3041 a8083063 Iustin Pop
      if not rpc.call_blockdev_removechild(instance.primary_node,
3042 a8083063 Iustin Pop
                                                dev, child):
3043 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3044 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3045 a8083063 Iustin Pop
        continue
3046 a8083063 Iustin Pop
3047 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3048 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3049 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3050 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3051 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3052 a8083063 Iustin Pop
                       " continuing operation." % node)
3053 a8083063 Iustin Pop
3054 a8083063 Iustin Pop
      dev.children.remove(child)
3055 a8083063 Iustin Pop
3056 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3057 a8083063 Iustin Pop
3058 a8083063 Iustin Pop
3059 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3060 a8083063 Iustin Pop
  """Query runtime instance data.
3061 a8083063 Iustin Pop

3062 a8083063 Iustin Pop
  """
3063 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3064 a8083063 Iustin Pop
3065 a8083063 Iustin Pop
  def CheckPrereq(self):
3066 a8083063 Iustin Pop
    """Check prerequisites.
3067 a8083063 Iustin Pop

3068 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3069 a8083063 Iustin Pop

3070 a8083063 Iustin Pop
    """
3071 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3072 a8083063 Iustin Pop
      raise errors.OpPrereqError, "Invalid argument type 'instances'"
3073 a8083063 Iustin Pop
    if self.op.instances:
3074 a8083063 Iustin Pop
      self.wanted_instances = []
3075 a8083063 Iustin Pop
      names = self.op.instances
3076 a8083063 Iustin Pop
      for name in names:
3077 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3078 a8083063 Iustin Pop
        if instance is None:
3079 a8083063 Iustin Pop
          raise errors.OpPrereqError, ("No such instance name '%s'" % name)
3080 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3081 a8083063 Iustin Pop
    else:
3082 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3083 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3084 a8083063 Iustin Pop
    return
3085 a8083063 Iustin Pop
3086 a8083063 Iustin Pop
3087 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3088 a8083063 Iustin Pop
    """Compute block device status.
3089 a8083063 Iustin Pop

3090 a8083063 Iustin Pop
    """
3091 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3092 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3093 a8083063 Iustin Pop
    if dev.dev_type == "drbd":
3094 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3095 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3096 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3097 a8083063 Iustin Pop
      else:
3098 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3099 a8083063 Iustin Pop
3100 a8083063 Iustin Pop
    if snode:
3101 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3102 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3103 a8083063 Iustin Pop
    else:
3104 a8083063 Iustin Pop
      dev_sstatus = None
3105 a8083063 Iustin Pop
3106 a8083063 Iustin Pop
    if dev.children:
3107 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3108 a8083063 Iustin Pop
                      for child in dev.children]
3109 a8083063 Iustin Pop
    else:
3110 a8083063 Iustin Pop
      dev_children = []
3111 a8083063 Iustin Pop
3112 a8083063 Iustin Pop
    data = {
3113 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3114 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3115 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3116 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3117 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3118 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3119 a8083063 Iustin Pop
      "children": dev_children,
3120 a8083063 Iustin Pop
      }
3121 a8083063 Iustin Pop
3122 a8083063 Iustin Pop
    return data
3123 a8083063 Iustin Pop
3124 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3125 a8083063 Iustin Pop
    """Gather and return data"""
3126 a8083063 Iustin Pop
    result = {}
3127 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3128 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3129 a8083063 Iustin Pop
                                                instance.name)
3130 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3131 a8083063 Iustin Pop
        remote_state = "up"
3132 a8083063 Iustin Pop
      else:
3133 a8083063 Iustin Pop
        remote_state = "down"
3134 a8083063 Iustin Pop
      if instance.status == "down":
3135 a8083063 Iustin Pop
        config_state = "down"
3136 a8083063 Iustin Pop
      else:
3137 a8083063 Iustin Pop
        config_state = "up"
3138 a8083063 Iustin Pop
3139 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3140 a8083063 Iustin Pop
               for device in instance.disks]
3141 a8083063 Iustin Pop
3142 a8083063 Iustin Pop
      idict = {
3143 a8083063 Iustin Pop
        "name": instance.name,
3144 a8083063 Iustin Pop
        "config_state": config_state,
3145 a8083063 Iustin Pop
        "run_state": remote_state,
3146 a8083063 Iustin Pop
        "pnode": instance.primary_node,
3147 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
3148 a8083063 Iustin Pop
        "os": instance.os,
3149 a8083063 Iustin Pop
        "memory": instance.memory,
3150 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3151 a8083063 Iustin Pop
        "disks": disks,
3152 a8083063 Iustin Pop
        }
3153 a8083063 Iustin Pop
3154 a8083063 Iustin Pop
      result[instance.name] = idict
3155 a8083063 Iustin Pop
3156 a8083063 Iustin Pop
    return result
3157 a8083063 Iustin Pop
3158 a8083063 Iustin Pop
3159 a8083063 Iustin Pop
class LUQueryNodeData(NoHooksLU):
3160 a8083063 Iustin Pop
  """Logical unit for querying node data.
3161 a8083063 Iustin Pop

3162 a8083063 Iustin Pop
  """
3163 a8083063 Iustin Pop
  _OP_REQP = ["nodes"]
3164 a8083063 Iustin Pop
3165 a8083063 Iustin Pop
  def CheckPrereq(self):
3166 a8083063 Iustin Pop
    """Check prerequisites.
3167 a8083063 Iustin Pop

3168 a8083063 Iustin Pop
    This only checks the optional node list against the existing names.
3169 a8083063 Iustin Pop

3170 a8083063 Iustin Pop
    """
3171 dcb93971 Michael Hanselmann
    self.wanted_nodes = _GetWantedNodes(self, self.op.nodes)
3172 a8083063 Iustin Pop
3173 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3174 a8083063 Iustin Pop
    """Compute and return the list of nodes.
3175 a8083063 Iustin Pop

3176 a8083063 Iustin Pop
    """
3177 a8083063 Iustin Pop
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3178 a8083063 Iustin Pop
             in self.cfg.GetInstanceList()]
3179 a8083063 Iustin Pop
    result = []
3180 a8083063 Iustin Pop
    for node in self.wanted_nodes:
3181 a8083063 Iustin Pop
      result.append((node.name, node.primary_ip, node.secondary_ip,
3182 a8083063 Iustin Pop
                     [inst.name for inst in ilist
3183 a8083063 Iustin Pop
                      if inst.primary_node == node.name],
3184 a8083063 Iustin Pop
                     [inst.name for inst in ilist
3185 a8083063 Iustin Pop
                      if node.name in inst.secondary_nodes],
3186 a8083063 Iustin Pop
                     ))
3187 a8083063 Iustin Pop
    return result
3188 a8083063 Iustin Pop
3189 a8083063 Iustin Pop
3190 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
3191 a8083063 Iustin Pop
  """Modifies an instances's parameters.
3192 a8083063 Iustin Pop

3193 a8083063 Iustin Pop
  """
3194 a8083063 Iustin Pop
  HPATH = "instance-modify"
3195 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3196 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3197 a8083063 Iustin Pop
3198 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3199 a8083063 Iustin Pop
    """Build hooks env.
3200 a8083063 Iustin Pop

3201 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
3202 a8083063 Iustin Pop

3203 a8083063 Iustin Pop
    """
3204 a8083063 Iustin Pop
    env = {
3205 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
3206 a8083063 Iustin Pop
      }
3207 a8083063 Iustin Pop
    if self.mem:
3208 a8083063 Iustin Pop
      env["MEM_SIZE"] = self.mem
3209 a8083063 Iustin Pop
    if self.vcpus:
3210 a8083063 Iustin Pop
      env["VCPUS"] = self.vcpus
3211 a8083063 Iustin Pop
    if self.do_ip:
3212 a8083063 Iustin Pop
      env["INSTANCE_IP"] = self.ip
3213 a8083063 Iustin Pop
    if self.bridge:
3214 a8083063 Iustin Pop
      env["BRIDGE"] = self.bridge
3215 a8083063 Iustin Pop
3216 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3217 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3218 a8083063 Iustin Pop
3219 a8083063 Iustin Pop
    return env, nl, nl
3220 a8083063 Iustin Pop
3221 a8083063 Iustin Pop
  def CheckPrereq(self):
3222 a8083063 Iustin Pop
    """Check prerequisites.
3223 a8083063 Iustin Pop

3224 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
3225 a8083063 Iustin Pop

3226 a8083063 Iustin Pop
    """
3227 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
3228 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
3229 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
3230 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
3231 a8083063 Iustin Pop
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
3232 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("No changes submitted")
3233 a8083063 Iustin Pop
    if self.mem is not None:
3234 a8083063 Iustin Pop
      try:
3235 a8083063 Iustin Pop
        self.mem = int(self.mem)
3236 a8083063 Iustin Pop
      except ValueError, err:
3237 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Invalid memory size: %s" % str(err))
3238 a8083063 Iustin Pop
    if self.vcpus is not None:
3239 a8083063 Iustin Pop
      try:
3240 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
3241 a8083063 Iustin Pop
      except ValueError, err:
3242 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Invalid vcpus number: %s" % str(err))
3243 a8083063 Iustin Pop
    if self.ip is not None:
3244 a8083063 Iustin Pop
      self.do_ip = True
3245 a8083063 Iustin Pop
      if self.ip.lower() == "none":
3246 a8083063 Iustin Pop
        self.ip = None
3247 a8083063 Iustin Pop
      else:
3248 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
3249 a8083063 Iustin Pop
          raise errors.OpPrereqError, ("Invalid IP address '%s'." % self.ip)
3250 a8083063 Iustin Pop
    else:
3251 a8083063 Iustin Pop
      self.do_ip = False
3252 a8083063 Iustin Pop
3253 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3254 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3255 a8083063 Iustin Pop
    if instance is None:
3256 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("No such instance name '%s'" %
3257 a8083063 Iustin Pop
                                   self.op.instance_name)
3258 a8083063 Iustin Pop
    self.op.instance_name = instance.name
3259 a8083063 Iustin Pop
    self.instance = instance
3260 a8083063 Iustin Pop
    return
3261 a8083063 Iustin Pop
3262 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3263 a8083063 Iustin Pop
    """Modifies an instance.
3264 a8083063 Iustin Pop

3265 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
3266 a8083063 Iustin Pop
    """
3267 a8083063 Iustin Pop
    result = []
3268 a8083063 Iustin Pop
    instance = self.instance
3269 a8083063 Iustin Pop
    if self.mem:
3270 a8083063 Iustin Pop
      instance.memory = self.mem
3271 a8083063 Iustin Pop
      result.append(("mem", self.mem))
3272 a8083063 Iustin Pop
    if self.vcpus:
3273 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
3274 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
3275 a8083063 Iustin Pop
    if self.do_ip:
3276 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
3277 a8083063 Iustin Pop
      result.append(("ip", self.ip))
3278 a8083063 Iustin Pop
    if self.bridge:
3279 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
3280 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
3281 a8083063 Iustin Pop
3282 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3283 a8083063 Iustin Pop
3284 a8083063 Iustin Pop
    return result
3285 a8083063 Iustin Pop
3286 a8083063 Iustin Pop
3287 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
3288 a8083063 Iustin Pop
  """Query the exports list
3289 a8083063 Iustin Pop

3290 a8083063 Iustin Pop
  """
3291 a8083063 Iustin Pop
  _OP_REQP = []
3292 a8083063 Iustin Pop
3293 a8083063 Iustin Pop
  def CheckPrereq(self):
3294 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
3295 a8083063 Iustin Pop

3296 a8083063 Iustin Pop
    """
3297 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
3298 a8083063 Iustin Pop
3299 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3300 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
3301 a8083063 Iustin Pop

3302 a8083063 Iustin Pop
    Returns:
3303 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
3304 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
3305 a8083063 Iustin Pop
      that node.
3306 a8083063 Iustin Pop

3307 a8083063 Iustin Pop
    """
3308 dcb93971 Michael Hanselmann
    return rpc.call_export_list([node.name for node in self.nodes])
3309 a8083063 Iustin Pop
3310 a8083063 Iustin Pop
3311 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
3312 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
3313 a8083063 Iustin Pop

3314 a8083063 Iustin Pop
  """
3315 a8083063 Iustin Pop
  HPATH = "instance-export"
3316 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3317 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
3318 a8083063 Iustin Pop
3319 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3320 a8083063 Iustin Pop
    """Build hooks env.
3321 a8083063 Iustin Pop

3322 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
3323 a8083063 Iustin Pop

3324 a8083063 Iustin Pop
    """
3325 a8083063 Iustin Pop
    env = {
3326 a8083063 Iustin Pop
      "INSTANCE_NAME": self.op.instance_name,
3327 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
3328 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
3329 a8083063 Iustin Pop
      }
3330 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3331 a8083063 Iustin Pop
          self.op.target_node]
3332 a8083063 Iustin Pop
    return env, nl, nl
3333 a8083063 Iustin Pop
3334 a8083063 Iustin Pop
  def CheckPrereq(self):
3335 a8083063 Iustin Pop
    """Check prerequisites.
3336 a8083063 Iustin Pop

3337 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
3338 a8083063 Iustin Pop

3339 a8083063 Iustin Pop
    """
3340 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
3341 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
3342 a8083063 Iustin Pop
    if self.instance is None:
3343 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not found" %
3344 a8083063 Iustin Pop
                                   self.op.instance_name)
3345 a8083063 Iustin Pop
3346 a8083063 Iustin Pop
    # node verification
3347 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
3348 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
3349 a8083063 Iustin Pop
3350 a8083063 Iustin Pop
    if self.dst_node is None:
3351 fe7b0351 Michael Hanselmann
      raise errors.OpPrereqError, ("Destination node '%s' is unknown." %
3352 a8083063 Iustin Pop
                                   self.op.target_node)
3353 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
3354 a8083063 Iustin Pop
3355 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3356 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
3357 a8083063 Iustin Pop

3358 a8083063 Iustin Pop
    """
3359 a8083063 Iustin Pop
    instance = self.instance
3360 a8083063 Iustin Pop
    dst_node = self.dst_node
3361 a8083063 Iustin Pop
    src_node = instance.primary_node
3362 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
3363 a8083063 Iustin Pop
    if self.op.shutdown:
3364 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
3365 a8083063 Iustin Pop
      self.processor.ChainOpCode(op, feedback_fn)
3366 a8083063 Iustin Pop
3367 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
3368 a8083063 Iustin Pop
3369 a8083063 Iustin Pop
    snap_disks = []
3370 a8083063 Iustin Pop
3371 a8083063 Iustin Pop
    try:
3372 a8083063 Iustin Pop
      for disk in instance.disks:
3373 a8083063 Iustin Pop
        if disk.iv_name == "sda":
3374 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
3375 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
3376 a8083063 Iustin Pop
3377 a8083063 Iustin Pop
          if not new_dev_name:
3378 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
3379 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
3380 a8083063 Iustin Pop
          else:
3381 a8083063 Iustin Pop
            new_dev = objects.Disk(dev_type="lvm", size=disk.size,
3382 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
3383 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
3384 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
3385 a8083063 Iustin Pop
            snap_disks.append(new_dev)
3386 a8083063 Iustin Pop
3387 a8083063 Iustin Pop
    finally:
3388 a8083063 Iustin Pop
      if self.op.shutdown:
3389 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
3390 a8083063 Iustin Pop
                                       force=False)
3391 a8083063 Iustin Pop
        self.processor.ChainOpCode(op, feedback_fn)
3392 a8083063 Iustin Pop
3393 a8083063 Iustin Pop
    # TODO: check for size
3394 a8083063 Iustin Pop
3395 a8083063 Iustin Pop
    for dev in snap_disks:
3396 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
3397 a8083063 Iustin Pop
                                           instance):
3398 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
3399 a8083063 Iustin Pop
                     " %s to node %s" %
3400 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
3401 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
3402 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
3403 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
3404 a8083063 Iustin Pop
3405 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
3406 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
3407 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
3408 a8083063 Iustin Pop
3409 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
3410 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
3411 a8083063 Iustin Pop
3412 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
3413 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
3414 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
3415 a8083063 Iustin Pop
    if nodelist:
3416 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
3417 a8083063 Iustin Pop
      exportlist = self.processor.ChainOpCode(op, feedback_fn)
3418 a8083063 Iustin Pop
      for node in exportlist:
3419 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
3420 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
3421 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
3422 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))