Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ a0c3fea1

History | View | Annotate | Download (112.1 kB)

1 a8083063 Iustin Pop
#!/usr/bin/python
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import socket
30 a8083063 Iustin Pop
import time
31 a8083063 Iustin Pop
import tempfile
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 a8083063 Iustin Pop
35 a8083063 Iustin Pop
from ganeti import rpc
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 a8083063 Iustin Pop
from ganeti import config
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 a8083063 Iustin Pop
from ganeti import ssconf
46 a8083063 Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
52 a8083063 Iustin Pop
      with all the fields (even if as None)
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
57 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 a8083063 Iustin Pop
  REQ_CLUSTER = True
64 a8083063 Iustin Pop
  REQ_MASTER = True
65 a8083063 Iustin Pop
66 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
67 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
68 a8083063 Iustin Pop

69 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
70 a8083063 Iustin Pop
    validity.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    """
73 a8083063 Iustin Pop
    self.processor = processor
74 a8083063 Iustin Pop
    self.op = op
75 a8083063 Iustin Pop
    self.cfg = cfg
76 a8083063 Iustin Pop
    self.sstore = sstore
77 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
78 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
79 a8083063 Iustin Pop
      if attr_val is None:
80 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Required parameter '%s' missing" %
81 a8083063 Iustin Pop
                                     attr_name)
82 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
83 a8083063 Iustin Pop
      if not cfg.IsCluster():
84 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Cluster not initialized yet,"
85 a8083063 Iustin Pop
                                     " use 'gnt-cluster init' first.")
86 a8083063 Iustin Pop
      if self.REQ_MASTER:
87 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
88 a8083063 Iustin Pop
        if master != socket.gethostname():
89 a8083063 Iustin Pop
          raise errors.OpPrereqError, ("Commands must be run on the master"
90 a8083063 Iustin Pop
                                       " node %s" % master)
91 a8083063 Iustin Pop
92 a8083063 Iustin Pop
  def CheckPrereq(self):
93 a8083063 Iustin Pop
    """Check prerequisites for this LU.
94 a8083063 Iustin Pop

95 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
96 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
97 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
98 a8083063 Iustin Pop
    allowed.
99 a8083063 Iustin Pop

100 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
101 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
102 a8083063 Iustin Pop

103 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
104 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
105 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
106 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
107 a8083063 Iustin Pop

108 a8083063 Iustin Pop
    """
109 a8083063 Iustin Pop
    raise NotImplementedError
110 a8083063 Iustin Pop
111 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
112 a8083063 Iustin Pop
    """Execute the LU.
113 a8083063 Iustin Pop

114 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
115 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
116 a8083063 Iustin Pop
    code, or expected.
117 a8083063 Iustin Pop

118 a8083063 Iustin Pop
    """
119 a8083063 Iustin Pop
    raise NotImplementedError
120 a8083063 Iustin Pop
121 a8083063 Iustin Pop
  def BuildHooksEnv(self):
122 a8083063 Iustin Pop
    """Build hooks environment for this LU.
123 a8083063 Iustin Pop

124 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
125 a8083063 Iustin Pop
    containing the environment that will be used for running the
126 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
127 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
128 a8083063 Iustin Pop
    the hook should run after the execution.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
131 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
132 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
133 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
134 a8083063 Iustin Pop

135 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
136 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
137 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
138 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
139 a8083063 Iustin Pop
    None).
140 a8083063 Iustin Pop

141 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
142 a8083063 Iustin Pop
    not be called.
143 a8083063 Iustin Pop

144 a8083063 Iustin Pop
    """
145 a8083063 Iustin Pop
    raise NotImplementedError
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
148 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
149 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
150 a8083063 Iustin Pop

151 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
152 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
153 a8083063 Iustin Pop

154 a8083063 Iustin Pop
  """
155 a8083063 Iustin Pop
  HPATH = None
156 a8083063 Iustin Pop
  HTYPE = None
157 a8083063 Iustin Pop
158 a8083063 Iustin Pop
  def BuildHooksEnv(self):
159 a8083063 Iustin Pop
    """Build hooks env.
160 a8083063 Iustin Pop

161 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
162 a8083063 Iustin Pop

163 a8083063 Iustin Pop
    """
164 a8083063 Iustin Pop
    return
165 a8083063 Iustin Pop
166 a8083063 Iustin Pop
167 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
168 83120a01 Michael Hanselmann
  """Returns list of checked and expanded nodes.
169 83120a01 Michael Hanselmann

170 83120a01 Michael Hanselmann
  Args:
171 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
172 83120a01 Michael Hanselmann

173 83120a01 Michael Hanselmann
  """
174 dcb93971 Michael Hanselmann
  if nodes is not None and not isinstance(nodes, list):
175 dcb93971 Michael Hanselmann
    raise errors.OpPrereqError, "Invalid argument type 'nodes'"
176 dcb93971 Michael Hanselmann
177 dcb93971 Michael Hanselmann
  if nodes:
178 dcb93971 Michael Hanselmann
    wanted_nodes = []
179 dcb93971 Michael Hanselmann
180 dcb93971 Michael Hanselmann
    for name in nodes:
181 dcb93971 Michael Hanselmann
      node = lu.cfg.GetNodeInfo(lu.cfg.ExpandNodeName(name))
182 dcb93971 Michael Hanselmann
      if node is None:
183 dcb93971 Michael Hanselmann
        raise errors.OpPrereqError, ("No such node name '%s'" % name)
184 dcb93971 Michael Hanselmann
    wanted_nodes.append(node)
185 dcb93971 Michael Hanselmann
186 dcb93971 Michael Hanselmann
    return wanted_nodes
187 dcb93971 Michael Hanselmann
  else:
188 dcb93971 Michael Hanselmann
    return [lu.cfg.GetNodeInfo(name) for name in lu.cfg.GetNodeList()]
189 dcb93971 Michael Hanselmann
190 dcb93971 Michael Hanselmann
191 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
192 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
193 83120a01 Michael Hanselmann

194 83120a01 Michael Hanselmann
  Args:
195 83120a01 Michael Hanselmann
    static: Static fields
196 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
197 83120a01 Michael Hanselmann

198 83120a01 Michael Hanselmann
  """
199 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
200 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
201 dcb93971 Michael Hanselmann
202 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
203 dcb93971 Michael Hanselmann
204 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
205 83120a01 Michael Hanselmann
    raise errors.OpPrereqError, ("Unknown output fields selected: %s"
206 83120a01 Michael Hanselmann
                                 % ",".join(frozenset(selected).
207 83120a01 Michael Hanselmann
                                            difference(all_fields)))
208 dcb93971 Michael Hanselmann
209 dcb93971 Michael Hanselmann
210 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
211 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
212 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
213 ecb215b5 Michael Hanselmann

214 ecb215b5 Michael Hanselmann
  Args:
215 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
216 396e1b78 Michael Hanselmann
  """
217 396e1b78 Michael Hanselmann
  env = {
218 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
219 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
220 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
221 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
222 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
223 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
224 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
225 396e1b78 Michael Hanselmann
  }
226 396e1b78 Michael Hanselmann
227 396e1b78 Michael Hanselmann
  if nics:
228 396e1b78 Michael Hanselmann
    nic_count = len(nics)
229 396e1b78 Michael Hanselmann
    for idx, (ip, bridge) in enumerate(nics):
230 396e1b78 Michael Hanselmann
      if ip is None:
231 396e1b78 Michael Hanselmann
        ip = ""
232 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
233 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
234 396e1b78 Michael Hanselmann
  else:
235 396e1b78 Michael Hanselmann
    nic_count = 0
236 396e1b78 Michael Hanselmann
237 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
238 396e1b78 Michael Hanselmann
239 396e1b78 Michael Hanselmann
  return env
240 396e1b78 Michael Hanselmann
241 396e1b78 Michael Hanselmann
242 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
243 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
244 ecb215b5 Michael Hanselmann

245 ecb215b5 Michael Hanselmann
  Args:
246 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
247 ecb215b5 Michael Hanselmann
    override: dict of values to override
248 ecb215b5 Michael Hanselmann
  """
249 396e1b78 Michael Hanselmann
  args = {
250 396e1b78 Michael Hanselmann
    'name': instance.name,
251 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
252 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
253 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
254 396e1b78 Michael Hanselmann
    'status': instance.os,
255 396e1b78 Michael Hanselmann
    'memory': instance.memory,
256 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
257 396e1b78 Michael Hanselmann
    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
258 396e1b78 Michael Hanselmann
  }
259 396e1b78 Michael Hanselmann
  if override:
260 396e1b78 Michael Hanselmann
    args.update(override)
261 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
262 396e1b78 Michael Hanselmann
263 396e1b78 Michael Hanselmann
264 a8083063 Iustin Pop
def _UpdateEtcHosts(fullnode, ip):
265 a8083063 Iustin Pop
  """Ensure a node has a correct entry in /etc/hosts.
266 a8083063 Iustin Pop

267 a8083063 Iustin Pop
  Args:
268 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
269 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
270 a8083063 Iustin Pop

271 a8083063 Iustin Pop
  """
272 a8083063 Iustin Pop
  node = fullnode.split(".", 1)[0]
273 a8083063 Iustin Pop
274 a8083063 Iustin Pop
  f = open('/etc/hosts', 'r+')
275 a8083063 Iustin Pop
276 a8083063 Iustin Pop
  inthere = False
277 a8083063 Iustin Pop
278 a8083063 Iustin Pop
  save_lines = []
279 a8083063 Iustin Pop
  add_lines = []
280 a8083063 Iustin Pop
  removed = False
281 a8083063 Iustin Pop
282 a8083063 Iustin Pop
  while True:
283 a8083063 Iustin Pop
    rawline = f.readline()
284 a8083063 Iustin Pop
285 a8083063 Iustin Pop
    if not rawline:
286 a8083063 Iustin Pop
      # End of file
287 a8083063 Iustin Pop
      break
288 a8083063 Iustin Pop
289 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
290 a8083063 Iustin Pop
291 a8083063 Iustin Pop
    # Strip off comments
292 a8083063 Iustin Pop
    line = line.split('#')[0]
293 a8083063 Iustin Pop
294 a8083063 Iustin Pop
    if not line:
295 a8083063 Iustin Pop
      # Entire line was comment, skip
296 a8083063 Iustin Pop
      save_lines.append(rawline)
297 a8083063 Iustin Pop
      continue
298 a8083063 Iustin Pop
299 a8083063 Iustin Pop
    fields = line.split()
300 a8083063 Iustin Pop
301 a8083063 Iustin Pop
    haveall = True
302 a8083063 Iustin Pop
    havesome = False
303 a8083063 Iustin Pop
    for spec in [ ip, fullnode, node ]:
304 a8083063 Iustin Pop
      if spec not in fields:
305 a8083063 Iustin Pop
        haveall = False
306 a8083063 Iustin Pop
      if spec in fields:
307 a8083063 Iustin Pop
        havesome = True
308 a8083063 Iustin Pop
309 a8083063 Iustin Pop
    if haveall:
310 a8083063 Iustin Pop
      inthere = True
311 a8083063 Iustin Pop
      save_lines.append(rawline)
312 a8083063 Iustin Pop
      continue
313 a8083063 Iustin Pop
314 a8083063 Iustin Pop
    if havesome and not haveall:
315 a8083063 Iustin Pop
      # Line (old, or manual?) which is missing some.  Remove.
316 a8083063 Iustin Pop
      removed = True
317 a8083063 Iustin Pop
      continue
318 a8083063 Iustin Pop
319 a8083063 Iustin Pop
    save_lines.append(rawline)
320 a8083063 Iustin Pop
321 a8083063 Iustin Pop
  if not inthere:
322 a8083063 Iustin Pop
    add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
323 a8083063 Iustin Pop
324 a8083063 Iustin Pop
  if removed:
325 a8083063 Iustin Pop
    if add_lines:
326 a8083063 Iustin Pop
      save_lines = save_lines + add_lines
327 a8083063 Iustin Pop
328 a8083063 Iustin Pop
    # We removed a line, write a new file and replace old.
329 a8083063 Iustin Pop
    fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc')
330 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
331 a8083063 Iustin Pop
    newfile.write(''.join(save_lines))
332 a8083063 Iustin Pop
    newfile.close()
333 a8083063 Iustin Pop
    os.rename(tmpname, '/etc/hosts')
334 a8083063 Iustin Pop
335 a8083063 Iustin Pop
  elif add_lines:
336 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
337 a8083063 Iustin Pop
    f.seek(0, 2)
338 a8083063 Iustin Pop
    for add in add_lines:
339 a8083063 Iustin Pop
      f.write(add)
340 a8083063 Iustin Pop
341 a8083063 Iustin Pop
  f.close()
342 a8083063 Iustin Pop
343 a8083063 Iustin Pop
344 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
345 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
346 a8083063 Iustin Pop

347 a8083063 Iustin Pop
  Args:
348 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
349 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
350 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
351 a8083063 Iustin Pop

352 a8083063 Iustin Pop
  """
353 a8083063 Iustin Pop
  if os.path.exists('/etc/ssh/ssh_known_hosts'):
354 a8083063 Iustin Pop
    f = open('/etc/ssh/ssh_known_hosts', 'r+')
355 a8083063 Iustin Pop
  else:
356 a8083063 Iustin Pop
    f = open('/etc/ssh/ssh_known_hosts', 'w+')
357 a8083063 Iustin Pop
358 a8083063 Iustin Pop
  inthere = False
359 a8083063 Iustin Pop
360 a8083063 Iustin Pop
  save_lines = []
361 a8083063 Iustin Pop
  add_lines = []
362 a8083063 Iustin Pop
  removed = False
363 a8083063 Iustin Pop
364 a8083063 Iustin Pop
  while True:
365 a8083063 Iustin Pop
    rawline = f.readline()
366 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
367 a8083063 Iustin Pop
368 a8083063 Iustin Pop
    if not rawline:
369 a8083063 Iustin Pop
      # End of file
370 a8083063 Iustin Pop
      break
371 a8083063 Iustin Pop
372 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
373 a8083063 Iustin Pop
374 a8083063 Iustin Pop
    parts = line.split(' ')
375 a8083063 Iustin Pop
    fields = parts[0].split(',')
376 a8083063 Iustin Pop
    key = parts[2]
377 a8083063 Iustin Pop
378 a8083063 Iustin Pop
    haveall = True
379 a8083063 Iustin Pop
    havesome = False
380 a8083063 Iustin Pop
    for spec in [ ip, fullnode ]:
381 a8083063 Iustin Pop
      if spec not in fields:
382 a8083063 Iustin Pop
        haveall = False
383 a8083063 Iustin Pop
      if spec in fields:
384 a8083063 Iustin Pop
        havesome = True
385 a8083063 Iustin Pop
386 a8083063 Iustin Pop
    logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
387 a8083063 Iustin Pop
    if haveall and key == pubkey:
388 a8083063 Iustin Pop
      inthere = True
389 a8083063 Iustin Pop
      save_lines.append(rawline)
390 a8083063 Iustin Pop
      logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
391 a8083063 Iustin Pop
      continue
392 a8083063 Iustin Pop
393 a8083063 Iustin Pop
    if havesome and (not haveall or key != pubkey):
394 a8083063 Iustin Pop
      removed = True
395 a8083063 Iustin Pop
      logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
396 a8083063 Iustin Pop
      continue
397 a8083063 Iustin Pop
398 a8083063 Iustin Pop
    save_lines.append(rawline)
399 a8083063 Iustin Pop
400 a8083063 Iustin Pop
  if not inthere:
401 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
402 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
403 a8083063 Iustin Pop
404 a8083063 Iustin Pop
  if removed:
405 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
406 a8083063 Iustin Pop
407 a8083063 Iustin Pop
    # Write a new file and replace old.
408 a8083063 Iustin Pop
    fd, tmpname = tempfile.mkstemp('tmp', 'ssh_known_hosts_', '/etc/ssh')
409 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
410 a8083063 Iustin Pop
    newfile.write(''.join(save_lines))
411 a8083063 Iustin Pop
    newfile.close()
412 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
413 a8083063 Iustin Pop
    os.rename(tmpname, '/etc/ssh/ssh_known_hosts')
414 a8083063 Iustin Pop
415 a8083063 Iustin Pop
  elif add_lines:
416 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
417 a8083063 Iustin Pop
    f.seek(0, 2)
418 a8083063 Iustin Pop
    for add in add_lines:
419 a8083063 Iustin Pop
      f.write(add)
420 a8083063 Iustin Pop
421 a8083063 Iustin Pop
  f.close()
422 a8083063 Iustin Pop
423 a8083063 Iustin Pop
424 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
425 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
426 a8083063 Iustin Pop

427 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
428 a8083063 Iustin Pop
  is the error message.
429 a8083063 Iustin Pop

430 a8083063 Iustin Pop
  """
431 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
432 a8083063 Iustin Pop
  if vgsize is None:
433 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
434 a8083063 Iustin Pop
  elif vgsize < 20480:
435 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
436 191a8385 Guido Trotter
            (vgname, vgsize))
437 a8083063 Iustin Pop
  return None
438 a8083063 Iustin Pop
439 a8083063 Iustin Pop
440 a8083063 Iustin Pop
def _InitSSHSetup(node):
441 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
442 a8083063 Iustin Pop

443 a8083063 Iustin Pop

444 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
445 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
446 a8083063 Iustin Pop

447 a8083063 Iustin Pop
  Args:
448 a8083063 Iustin Pop
    node: the name of this host as a fqdn
449 a8083063 Iustin Pop

450 a8083063 Iustin Pop
  """
451 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/known_hosts')
452 a8083063 Iustin Pop
453 a8083063 Iustin Pop
  if os.path.exists('/root/.ssh/id_dsa'):
454 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa')
455 a8083063 Iustin Pop
  if os.path.exists('/root/.ssh/id_dsa.pub'):
456 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa.pub')
457 a8083063 Iustin Pop
458 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/id_dsa')
459 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/id_dsa.pub')
460 a8083063 Iustin Pop
461 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
462 a8083063 Iustin Pop
                         "-f", "/root/.ssh/id_dsa",
463 a8083063 Iustin Pop
                         "-q", "-N", ""])
464 a8083063 Iustin Pop
  if result.failed:
465 a8083063 Iustin Pop
    raise errors.OpExecError, ("could not generate ssh keypair, error %s" %
466 a8083063 Iustin Pop
                               result.output)
467 a8083063 Iustin Pop
468 a8083063 Iustin Pop
  f = open('/root/.ssh/id_dsa.pub', 'r')
469 a8083063 Iustin Pop
  try:
470 a8083063 Iustin Pop
    utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192))
471 a8083063 Iustin Pop
  finally:
472 a8083063 Iustin Pop
    f.close()
473 a8083063 Iustin Pop
474 a8083063 Iustin Pop
475 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
476 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
477 a8083063 Iustin Pop

478 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
479 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
480 a8083063 Iustin Pop

481 a8083063 Iustin Pop
  """
482 a8083063 Iustin Pop
  # Create pseudo random password
483 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
484 a8083063 Iustin Pop
  # and write it into sstore
485 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
486 a8083063 Iustin Pop
487 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
488 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
489 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
490 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
491 a8083063 Iustin Pop
  if result.failed:
492 a8083063 Iustin Pop
    raise errors.OpExecError, ("could not generate server ssl cert, command"
493 a8083063 Iustin Pop
                               " %s had exitcode %s and error message %s" %
494 a8083063 Iustin Pop
                               (result.cmd, result.exit_code, result.output))
495 a8083063 Iustin Pop
496 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
497 a8083063 Iustin Pop
498 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
499 a8083063 Iustin Pop
500 a8083063 Iustin Pop
  if result.failed:
501 a8083063 Iustin Pop
    raise errors.OpExecError, ("could not start the node daemon, command %s"
502 a8083063 Iustin Pop
                               " had exitcode %s and error %s" %
503 a8083063 Iustin Pop
                               (result.cmd, result.exit_code, result.output))
504 a8083063 Iustin Pop
505 a8083063 Iustin Pop
506 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
507 a8083063 Iustin Pop
  """Initialise the cluster.
508 a8083063 Iustin Pop

509 a8083063 Iustin Pop
  """
510 a8083063 Iustin Pop
  HPATH = "cluster-init"
511 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
512 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
513 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
514 a8083063 Iustin Pop
  REQ_CLUSTER = False
515 a8083063 Iustin Pop
516 a8083063 Iustin Pop
  def BuildHooksEnv(self):
517 a8083063 Iustin Pop
    """Build hooks env.
518 a8083063 Iustin Pop

519 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
520 a8083063 Iustin Pop
    ourselves in the post-run node list.
521 a8083063 Iustin Pop

522 a8083063 Iustin Pop
    """
523 396e1b78 Michael Hanselmann
    env = {
524 396e1b78 Michael Hanselmann
      "CLUSTER": self.op.cluster_name,
525 396e1b78 Michael Hanselmann
      "MASTER": self.hostname['hostname_full'],
526 396e1b78 Michael Hanselmann
      }
527 a8083063 Iustin Pop
    return env, [], [self.hostname['hostname_full']]
528 a8083063 Iustin Pop
529 a8083063 Iustin Pop
  def CheckPrereq(self):
530 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
531 a8083063 Iustin Pop

532 a8083063 Iustin Pop
    """
533 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
534 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Cluster is already initialised")
535 a8083063 Iustin Pop
536 a8083063 Iustin Pop
    hostname_local = socket.gethostname()
537 a8083063 Iustin Pop
    self.hostname = hostname = utils.LookupHostname(hostname_local)
538 a8083063 Iustin Pop
    if not hostname:
539 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Cannot resolve my own hostname ('%s')" %
540 a8083063 Iustin Pop
                                   hostname_local)
541 a8083063 Iustin Pop
542 a8083063 Iustin Pop
    self.clustername = clustername = utils.LookupHostname(self.op.cluster_name)
543 a8083063 Iustin Pop
    if not clustername:
544 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Cannot resolve given cluster name ('%s')"
545 a8083063 Iustin Pop
                                   % self.op.cluster_name)
546 a8083063 Iustin Pop
547 a8083063 Iustin Pop
    result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname['ip']])
548 a8083063 Iustin Pop
    if result.failed:
549 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Inconsistency: this host's name resolves"
550 a8083063 Iustin Pop
                                   " to %s,\nbut this ip address does not"
551 a8083063 Iustin Pop
                                   " belong to this host."
552 a8083063 Iustin Pop
                                   " Aborting." % hostname['ip'])
553 a8083063 Iustin Pop
554 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
555 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
556 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid secondary ip given")
557 a8083063 Iustin Pop
    if secondary_ip and secondary_ip != hostname['ip']:
558 a8083063 Iustin Pop
      result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip])
559 a8083063 Iustin Pop
      if result.failed:
560 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("You gave %s as secondary IP,\n"
561 a8083063 Iustin Pop
                                     "but it does not belong to this host." %
562 a8083063 Iustin Pop
                                     secondary_ip)
563 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
564 a8083063 Iustin Pop
565 a8083063 Iustin Pop
    # checks presence of the volume group given
566 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
567 a8083063 Iustin Pop
568 a8083063 Iustin Pop
    if vgstatus:
569 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Error: %s" % vgstatus)
570 a8083063 Iustin Pop
571 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
572 a8083063 Iustin Pop
                    self.op.mac_prefix):
573 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid mac prefix given '%s'" %
574 a8083063 Iustin Pop
                                   self.op.mac_prefix)
575 a8083063 Iustin Pop
576 a8083063 Iustin Pop
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
577 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid hypervisor type given '%s'" %
578 a8083063 Iustin Pop
                                   self.op.hypervisor_type)
579 a8083063 Iustin Pop
580 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
581 880478f8 Iustin Pop
    if result.failed:
582 880478f8 Iustin Pop
      raise errors.OpPrereqError, ("Invalid master netdev given (%s): '%s'" %
583 880478f8 Iustin Pop
                                   (self.op.master_netdev, result.output))
584 880478f8 Iustin Pop
585 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
586 a8083063 Iustin Pop
    """Initialize the cluster.
587 a8083063 Iustin Pop

588 a8083063 Iustin Pop
    """
589 a8083063 Iustin Pop
    clustername = self.clustername
590 a8083063 Iustin Pop
    hostname = self.hostname
591 a8083063 Iustin Pop
592 a8083063 Iustin Pop
    # set up the simple store
593 a8083063 Iustin Pop
    ss = ssconf.SimpleStore()
594 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
595 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname['hostname_full'])
596 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername['ip'])
597 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
598 5fcdc80d Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername['hostname'])
599 a8083063 Iustin Pop
600 a8083063 Iustin Pop
    # set up the inter-node password and certificate
601 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
602 a8083063 Iustin Pop
603 a8083063 Iustin Pop
    # start the master ip
604 a8083063 Iustin Pop
    rpc.call_node_start_master(hostname['hostname_full'])
605 a8083063 Iustin Pop
606 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
607 a8083063 Iustin Pop
    f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r')
608 a8083063 Iustin Pop
    try:
609 a8083063 Iustin Pop
      sshline = f.read()
610 a8083063 Iustin Pop
    finally:
611 a8083063 Iustin Pop
      f.close()
612 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
613 a8083063 Iustin Pop
614 a8083063 Iustin Pop
    _UpdateEtcHosts(hostname['hostname_full'],
615 a8083063 Iustin Pop
                    hostname['ip'],
616 a8083063 Iustin Pop
                    )
617 a8083063 Iustin Pop
618 a8083063 Iustin Pop
    _UpdateKnownHosts(hostname['hostname_full'],
619 a8083063 Iustin Pop
                      hostname['ip'],
620 a8083063 Iustin Pop
                      sshkey,
621 a8083063 Iustin Pop
                      )
622 a8083063 Iustin Pop
623 a8083063 Iustin Pop
    _InitSSHSetup(hostname['hostname'])
624 a8083063 Iustin Pop
625 a8083063 Iustin Pop
    # init of cluster config file
626 a8083063 Iustin Pop
    cfgw = config.ConfigWriter()
627 a8083063 Iustin Pop
    cfgw.InitConfig(hostname['hostname'], hostname['ip'], self.secondary_ip,
628 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
629 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
630 a8083063 Iustin Pop
631 a8083063 Iustin Pop
632 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
633 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
634 a8083063 Iustin Pop

635 a8083063 Iustin Pop
  """
636 a8083063 Iustin Pop
  _OP_REQP = []
637 a8083063 Iustin Pop
638 a8083063 Iustin Pop
  def CheckPrereq(self):
639 a8083063 Iustin Pop
    """Check prerequisites.
640 a8083063 Iustin Pop

641 a8083063 Iustin Pop
    This checks whether the cluster is empty.
642 a8083063 Iustin Pop

643 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
644 a8083063 Iustin Pop

645 a8083063 Iustin Pop
    """
646 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
647 a8083063 Iustin Pop
648 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
649 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
650 880478f8 Iustin Pop
      raise errors.OpPrereqError, ("There are still %d node(s) in "
651 880478f8 Iustin Pop
                                   "this cluster." % (len(nodelist) - 1))
652 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
653 db915bd1 Michael Hanselmann
    if instancelist:
654 db915bd1 Michael Hanselmann
      raise errors.OpPrereqError, ("There are still %d instance(s) in "
655 db915bd1 Michael Hanselmann
                                   "this cluster." % len(instancelist))
656 a8083063 Iustin Pop
657 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
658 a8083063 Iustin Pop
    """Destroys the cluster.
659 a8083063 Iustin Pop

660 a8083063 Iustin Pop
    """
661 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa')
662 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa.pub')
663 880478f8 Iustin Pop
    rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
664 a8083063 Iustin Pop
665 a8083063 Iustin Pop
666 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
667 a8083063 Iustin Pop
  """Verifies the cluster status.
668 a8083063 Iustin Pop

669 a8083063 Iustin Pop
  """
670 a8083063 Iustin Pop
  _OP_REQP = []
671 a8083063 Iustin Pop
672 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
673 a8083063 Iustin Pop
                  remote_version, feedback_fn):
674 a8083063 Iustin Pop
    """Run multiple tests against a node.
675 a8083063 Iustin Pop

676 a8083063 Iustin Pop
    Test list:
677 a8083063 Iustin Pop
      - compares ganeti version
678 a8083063 Iustin Pop
      - checks vg existance and size > 20G
679 a8083063 Iustin Pop
      - checks config file checksum
680 a8083063 Iustin Pop
      - checks ssh to other nodes
681 a8083063 Iustin Pop

682 a8083063 Iustin Pop
    Args:
683 a8083063 Iustin Pop
      node: name of the node to check
684 a8083063 Iustin Pop
      file_list: required list of files
685 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
686 098c0958 Michael Hanselmann

687 a8083063 Iustin Pop
    """
688 a8083063 Iustin Pop
    # compares ganeti version
689 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
690 a8083063 Iustin Pop
    if not remote_version:
691 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
692 a8083063 Iustin Pop
      return True
693 a8083063 Iustin Pop
694 a8083063 Iustin Pop
    if local_version != remote_version:
695 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
696 a8083063 Iustin Pop
                      (local_version, node, remote_version))
697 a8083063 Iustin Pop
      return True
698 a8083063 Iustin Pop
699 a8083063 Iustin Pop
    # checks vg existance and size > 20G
700 a8083063 Iustin Pop
701 a8083063 Iustin Pop
    bad = False
702 a8083063 Iustin Pop
    if not vglist:
703 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
704 a8083063 Iustin Pop
                      (node,))
705 a8083063 Iustin Pop
      bad = True
706 a8083063 Iustin Pop
    else:
707 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
708 a8083063 Iustin Pop
      if vgstatus:
709 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
710 a8083063 Iustin Pop
        bad = True
711 a8083063 Iustin Pop
712 a8083063 Iustin Pop
    # checks config file checksum
713 a8083063 Iustin Pop
    # checks ssh to any
714 a8083063 Iustin Pop
715 a8083063 Iustin Pop
    if 'filelist' not in node_result:
716 a8083063 Iustin Pop
      bad = True
717 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
718 a8083063 Iustin Pop
    else:
719 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
720 a8083063 Iustin Pop
      for file_name in file_list:
721 a8083063 Iustin Pop
        if file_name not in remote_cksum:
722 a8083063 Iustin Pop
          bad = True
723 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
724 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
725 a8083063 Iustin Pop
          bad = True
726 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
727 a8083063 Iustin Pop
728 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
729 a8083063 Iustin Pop
      bad = True
730 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
731 a8083063 Iustin Pop
    else:
732 a8083063 Iustin Pop
      if node_result['nodelist']:
733 a8083063 Iustin Pop
        bad = True
734 a8083063 Iustin Pop
        for node in node_result['nodelist']:
735 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
736 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
737 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
738 a8083063 Iustin Pop
    if hyp_result is not None:
739 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
740 a8083063 Iustin Pop
    return bad
741 a8083063 Iustin Pop
742 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
743 a8083063 Iustin Pop
    """Verify an instance.
744 a8083063 Iustin Pop

745 a8083063 Iustin Pop
    This function checks to see if the required block devices are
746 a8083063 Iustin Pop
    available on the instance's node.
747 a8083063 Iustin Pop

748 a8083063 Iustin Pop
    """
749 a8083063 Iustin Pop
    bad = False
750 a8083063 Iustin Pop
751 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
752 a8083063 Iustin Pop
    if not instance in instancelist:
753 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
754 a8083063 Iustin Pop
                      (instance, instancelist))
755 a8083063 Iustin Pop
      bad = True
756 a8083063 Iustin Pop
757 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
758 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
759 a8083063 Iustin Pop
760 a8083063 Iustin Pop
    node_vol_should = {}
761 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
762 a8083063 Iustin Pop
763 a8083063 Iustin Pop
    for node in node_vol_should:
764 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
765 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
766 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
767 a8083063 Iustin Pop
                          (volume, node))
768 a8083063 Iustin Pop
          bad = True
769 a8083063 Iustin Pop
770 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
771 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
772 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
773 a8083063 Iustin Pop
                        (instance, node_current))
774 a8083063 Iustin Pop
        bad = True
775 a8083063 Iustin Pop
776 a8083063 Iustin Pop
    for node in node_instance:
777 a8083063 Iustin Pop
      if (not node == node_current):
778 a8083063 Iustin Pop
        if instance in node_instance[node]:
779 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
780 a8083063 Iustin Pop
                          (instance, node))
781 a8083063 Iustin Pop
          bad = True
782 a8083063 Iustin Pop
783 a8083063 Iustin Pop
    return not bad
784 a8083063 Iustin Pop
785 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
786 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
787 a8083063 Iustin Pop

788 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
789 a8083063 Iustin Pop
    reported as unknown.
790 a8083063 Iustin Pop

791 a8083063 Iustin Pop
    """
792 a8083063 Iustin Pop
    bad = False
793 a8083063 Iustin Pop
794 a8083063 Iustin Pop
    for node in node_vol_is:
795 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
796 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
797 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
798 a8083063 Iustin Pop
                      (volume, node))
799 a8083063 Iustin Pop
          bad = True
800 a8083063 Iustin Pop
    return bad
801 a8083063 Iustin Pop
802 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
803 a8083063 Iustin Pop
    """Verify the list of running instances.
804 a8083063 Iustin Pop

805 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
806 a8083063 Iustin Pop

807 a8083063 Iustin Pop
    """
808 a8083063 Iustin Pop
    bad = False
809 a8083063 Iustin Pop
    for node in node_instance:
810 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
811 a8083063 Iustin Pop
        if runninginstance not in instancelist:
812 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
813 a8083063 Iustin Pop
                          (runninginstance, node))
814 a8083063 Iustin Pop
          bad = True
815 a8083063 Iustin Pop
    return bad
816 a8083063 Iustin Pop
817 a8083063 Iustin Pop
  def CheckPrereq(self):
818 a8083063 Iustin Pop
    """Check prerequisites.
819 a8083063 Iustin Pop

820 a8083063 Iustin Pop
    This has no prerequisites.
821 a8083063 Iustin Pop

822 a8083063 Iustin Pop
    """
823 a8083063 Iustin Pop
    pass
824 a8083063 Iustin Pop
825 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
826 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
827 a8083063 Iustin Pop

828 a8083063 Iustin Pop
    """
829 a8083063 Iustin Pop
    bad = False
830 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
831 a8083063 Iustin Pop
    self.cfg.VerifyConfig()
832 a8083063 Iustin Pop
833 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
834 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
835 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
836 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
837 a8083063 Iustin Pop
    node_volume = {}
838 a8083063 Iustin Pop
    node_instance = {}
839 a8083063 Iustin Pop
840 a8083063 Iustin Pop
    # FIXME: verify OS list
841 a8083063 Iustin Pop
    # do local checksums
842 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
843 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
844 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
845 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
846 a8083063 Iustin Pop
847 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
848 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
849 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
850 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
851 a8083063 Iustin Pop
    node_verify_param = {
852 a8083063 Iustin Pop
      'filelist': file_names,
853 a8083063 Iustin Pop
      'nodelist': nodelist,
854 a8083063 Iustin Pop
      'hypervisor': None,
855 a8083063 Iustin Pop
      }
856 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
857 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
858 a8083063 Iustin Pop
859 a8083063 Iustin Pop
    for node in nodelist:
860 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
861 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
862 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
863 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
864 a8083063 Iustin Pop
      bad = bad or result
865 a8083063 Iustin Pop
866 a8083063 Iustin Pop
      # node_volume
867 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
868 a8083063 Iustin Pop
869 a8083063 Iustin Pop
      if type(volumeinfo) != dict:
870 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
871 a8083063 Iustin Pop
        bad = True
872 a8083063 Iustin Pop
        continue
873 a8083063 Iustin Pop
874 a8083063 Iustin Pop
      node_volume[node] = volumeinfo
875 a8083063 Iustin Pop
876 a8083063 Iustin Pop
      # node_instance
877 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
878 a8083063 Iustin Pop
      if type(nodeinstance) != list:
879 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
880 a8083063 Iustin Pop
        bad = True
881 a8083063 Iustin Pop
        continue
882 a8083063 Iustin Pop
883 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
884 a8083063 Iustin Pop
885 a8083063 Iustin Pop
    node_vol_should = {}
886 a8083063 Iustin Pop
887 a8083063 Iustin Pop
    for instance in instancelist:
888 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
889 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
890 a8083063 Iustin Pop
                                     feedback_fn)
891 a8083063 Iustin Pop
      bad = bad or result
892 a8083063 Iustin Pop
893 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
894 a8083063 Iustin Pop
895 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
896 a8083063 Iustin Pop
897 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
898 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
899 a8083063 Iustin Pop
                                       feedback_fn)
900 a8083063 Iustin Pop
    bad = bad or result
901 a8083063 Iustin Pop
902 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
903 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
904 a8083063 Iustin Pop
                                         feedback_fn)
905 a8083063 Iustin Pop
    bad = bad or result
906 a8083063 Iustin Pop
907 a8083063 Iustin Pop
    return int(bad)
908 a8083063 Iustin Pop
909 a8083063 Iustin Pop
910 a8083063 Iustin Pop
def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
911 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
912 a8083063 Iustin Pop

913 a8083063 Iustin Pop
  """
914 a8083063 Iustin Pop
  if not instance.disks:
915 a8083063 Iustin Pop
    return True
916 a8083063 Iustin Pop
917 a8083063 Iustin Pop
  if not oneshot:
918 a8083063 Iustin Pop
    logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
919 a8083063 Iustin Pop
920 a8083063 Iustin Pop
  node = instance.primary_node
921 a8083063 Iustin Pop
922 a8083063 Iustin Pop
  for dev in instance.disks:
923 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
924 a8083063 Iustin Pop
925 a8083063 Iustin Pop
  retries = 0
926 a8083063 Iustin Pop
  while True:
927 a8083063 Iustin Pop
    max_time = 0
928 a8083063 Iustin Pop
    done = True
929 a8083063 Iustin Pop
    cumul_degraded = False
930 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
931 a8083063 Iustin Pop
    if not rstats:
932 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
933 a8083063 Iustin Pop
      retries += 1
934 a8083063 Iustin Pop
      if retries >= 10:
935 a8083063 Iustin Pop
        raise errors.RemoteError, ("Can't contact node %s for mirror data,"
936 a8083063 Iustin Pop
                                   " aborting." % node)
937 a8083063 Iustin Pop
      time.sleep(6)
938 a8083063 Iustin Pop
      continue
939 a8083063 Iustin Pop
    retries = 0
940 a8083063 Iustin Pop
    for i in range(len(rstats)):
941 a8083063 Iustin Pop
      mstat = rstats[i]
942 a8083063 Iustin Pop
      if mstat is None:
943 a8083063 Iustin Pop
        logger.ToStderr("Can't compute data for node %s/%s" %
944 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
945 a8083063 Iustin Pop
        continue
946 a8083063 Iustin Pop
      perc_done, est_time, is_degraded = mstat
947 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
948 a8083063 Iustin Pop
      if perc_done is not None:
949 a8083063 Iustin Pop
        done = False
950 a8083063 Iustin Pop
        if est_time is not None:
951 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
952 a8083063 Iustin Pop
          max_time = est_time
953 a8083063 Iustin Pop
        else:
954 a8083063 Iustin Pop
          rem_time = "no time estimate"
955 a8083063 Iustin Pop
        logger.ToStdout("- device %s: %5.2f%% done, %s" %
956 a8083063 Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
957 a8083063 Iustin Pop
    if done or oneshot:
958 a8083063 Iustin Pop
      break
959 a8083063 Iustin Pop
960 a8083063 Iustin Pop
    if unlock:
961 a8083063 Iustin Pop
      utils.Unlock('cmd')
962 a8083063 Iustin Pop
    try:
963 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
964 a8083063 Iustin Pop
    finally:
965 a8083063 Iustin Pop
      if unlock:
966 a8083063 Iustin Pop
        utils.Lock('cmd')
967 a8083063 Iustin Pop
968 a8083063 Iustin Pop
  if done:
969 a8083063 Iustin Pop
    logger.ToStdout("Instance %s's disks are in sync." % instance.name)
970 a8083063 Iustin Pop
  return not cumul_degraded
971 a8083063 Iustin Pop
972 a8083063 Iustin Pop
973 a8083063 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary):
974 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
975 a8083063 Iustin Pop

976 a8083063 Iustin Pop
  """
977 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
978 a8083063 Iustin Pop
979 a8083063 Iustin Pop
  result = True
980 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
981 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
982 a8083063 Iustin Pop
    if not rstats:
983 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
984 a8083063 Iustin Pop
      result = False
985 a8083063 Iustin Pop
    else:
986 a8083063 Iustin Pop
      result = result and (not rstats[5])
987 a8083063 Iustin Pop
  if dev.children:
988 a8083063 Iustin Pop
    for child in dev.children:
989 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
990 a8083063 Iustin Pop
991 a8083063 Iustin Pop
  return result
992 a8083063 Iustin Pop
993 a8083063 Iustin Pop
994 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
995 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
996 a8083063 Iustin Pop

997 a8083063 Iustin Pop
  """
998 a8083063 Iustin Pop
  _OP_REQP = []
999 a8083063 Iustin Pop
1000 a8083063 Iustin Pop
  def CheckPrereq(self):
1001 a8083063 Iustin Pop
    """Check prerequisites.
1002 a8083063 Iustin Pop

1003 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1004 a8083063 Iustin Pop

1005 a8083063 Iustin Pop
    """
1006 a8083063 Iustin Pop
    return
1007 a8083063 Iustin Pop
1008 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1009 a8083063 Iustin Pop
    """Compute the list of OSes.
1010 a8083063 Iustin Pop

1011 a8083063 Iustin Pop
    """
1012 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1013 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1014 a8083063 Iustin Pop
    if node_data == False:
1015 a8083063 Iustin Pop
      raise errors.OpExecError, "Can't gather the list of OSes"
1016 a8083063 Iustin Pop
    return node_data
1017 a8083063 Iustin Pop
1018 a8083063 Iustin Pop
1019 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1020 a8083063 Iustin Pop
  """Logical unit for removing a node.
1021 a8083063 Iustin Pop

1022 a8083063 Iustin Pop
  """
1023 a8083063 Iustin Pop
  HPATH = "node-remove"
1024 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1025 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1026 a8083063 Iustin Pop
1027 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1028 a8083063 Iustin Pop
    """Build hooks env.
1029 a8083063 Iustin Pop

1030 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1031 a8083063 Iustin Pop
    node would not allows itself to run.
1032 a8083063 Iustin Pop

1033 a8083063 Iustin Pop
    """
1034 396e1b78 Michael Hanselmann
    env = {
1035 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1036 396e1b78 Michael Hanselmann
      }
1037 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1038 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1039 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1040 a8083063 Iustin Pop
1041 a8083063 Iustin Pop
  def CheckPrereq(self):
1042 a8083063 Iustin Pop
    """Check prerequisites.
1043 a8083063 Iustin Pop

1044 a8083063 Iustin Pop
    This checks:
1045 a8083063 Iustin Pop
     - the node exists in the configuration
1046 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1047 a8083063 Iustin Pop
     - it's not the master
1048 a8083063 Iustin Pop

1049 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1050 a8083063 Iustin Pop

1051 a8083063 Iustin Pop
    """
1052 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1053 a8083063 Iustin Pop
    if node is None:
1054 a8083063 Iustin Pop
      logger.Error("Error: Node '%s' is unknown." % self.op.node_name)
1055 a8083063 Iustin Pop
      return 1
1056 a8083063 Iustin Pop
1057 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1058 a8083063 Iustin Pop
1059 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1060 a8083063 Iustin Pop
    if node.name == masternode:
1061 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node is the master node,"
1062 a8083063 Iustin Pop
                                   " you need to failover first.")
1063 a8083063 Iustin Pop
1064 a8083063 Iustin Pop
    for instance_name in instance_list:
1065 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1066 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1067 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Instance %s still running on the node,"
1068 a8083063 Iustin Pop
                                     " please remove first." % instance_name)
1069 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1070 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Instance %s has node as a secondary,"
1071 a8083063 Iustin Pop
                                     " please remove first." % instance_name)
1072 a8083063 Iustin Pop
    self.op.node_name = node.name
1073 a8083063 Iustin Pop
    self.node = node
1074 a8083063 Iustin Pop
1075 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1076 a8083063 Iustin Pop
    """Removes the node from the cluster.
1077 a8083063 Iustin Pop

1078 a8083063 Iustin Pop
    """
1079 a8083063 Iustin Pop
    node = self.node
1080 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1081 a8083063 Iustin Pop
                node.name)
1082 a8083063 Iustin Pop
1083 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1084 a8083063 Iustin Pop
1085 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1086 a8083063 Iustin Pop
1087 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1088 a8083063 Iustin Pop
1089 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1090 a8083063 Iustin Pop
1091 a8083063 Iustin Pop
1092 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1093 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1094 a8083063 Iustin Pop

1095 a8083063 Iustin Pop
  """
1096 a8083063 Iustin Pop
  _OP_REQP = ["output_fields"]
1097 a8083063 Iustin Pop
1098 a8083063 Iustin Pop
  def CheckPrereq(self):
1099 a8083063 Iustin Pop
    """Check prerequisites.
1100 a8083063 Iustin Pop

1101 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1102 a8083063 Iustin Pop

1103 a8083063 Iustin Pop
    """
1104 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1105 a8083063 Iustin Pop
                                     "mtotal", "mnode", "mfree"])
1106 a8083063 Iustin Pop
1107 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "pinst", "sinst", "pip", "sip"],
1108 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1109 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1110 a8083063 Iustin Pop
1111 a8083063 Iustin Pop
1112 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1113 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1114 a8083063 Iustin Pop

1115 a8083063 Iustin Pop
    """
1116 a8083063 Iustin Pop
    nodenames = utils.NiceSort(self.cfg.GetNodeList())
1117 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1118 a8083063 Iustin Pop
1119 a8083063 Iustin Pop
1120 a8083063 Iustin Pop
    # begin data gathering
1121 a8083063 Iustin Pop
1122 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1123 a8083063 Iustin Pop
      live_data = {}
1124 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1125 a8083063 Iustin Pop
      for name in nodenames:
1126 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1127 a8083063 Iustin Pop
        if nodeinfo:
1128 a8083063 Iustin Pop
          live_data[name] = {
1129 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1130 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1131 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1132 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1133 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1134 a8083063 Iustin Pop
            }
1135 a8083063 Iustin Pop
        else:
1136 a8083063 Iustin Pop
          live_data[name] = {}
1137 a8083063 Iustin Pop
    else:
1138 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1139 a8083063 Iustin Pop
1140 a8083063 Iustin Pop
    node_to_primary = dict.fromkeys(nodenames, 0)
1141 a8083063 Iustin Pop
    node_to_secondary = dict.fromkeys(nodenames, 0)
1142 a8083063 Iustin Pop
1143 a8083063 Iustin Pop
    if "pinst" in self.op.output_fields or "sinst" in self.op.output_fields:
1144 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1145 a8083063 Iustin Pop
1146 a8083063 Iustin Pop
      for instance in instancelist:
1147 a8083063 Iustin Pop
        instanceinfo = self.cfg.GetInstanceInfo(instance)
1148 a8083063 Iustin Pop
        node_to_primary[instanceinfo.primary_node] += 1
1149 a8083063 Iustin Pop
        for secnode in instanceinfo.secondary_nodes:
1150 a8083063 Iustin Pop
          node_to_secondary[secnode] += 1
1151 a8083063 Iustin Pop
1152 a8083063 Iustin Pop
    # end data gathering
1153 a8083063 Iustin Pop
1154 a8083063 Iustin Pop
    output = []
1155 a8083063 Iustin Pop
    for node in nodelist:
1156 a8083063 Iustin Pop
      node_output = []
1157 a8083063 Iustin Pop
      for field in self.op.output_fields:
1158 a8083063 Iustin Pop
        if field == "name":
1159 a8083063 Iustin Pop
          val = node.name
1160 a8083063 Iustin Pop
        elif field == "pinst":
1161 a8083063 Iustin Pop
          val = node_to_primary[node.name]
1162 a8083063 Iustin Pop
        elif field == "sinst":
1163 a8083063 Iustin Pop
          val = node_to_secondary[node.name]
1164 a8083063 Iustin Pop
        elif field == "pip":
1165 a8083063 Iustin Pop
          val = node.primary_ip
1166 a8083063 Iustin Pop
        elif field == "sip":
1167 a8083063 Iustin Pop
          val = node.secondary_ip
1168 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1169 a8083063 Iustin Pop
          val = live_data[node.name].get(field, "?")
1170 a8083063 Iustin Pop
        else:
1171 a8083063 Iustin Pop
          raise errors.ParameterError, field
1172 a8083063 Iustin Pop
        val = str(val)
1173 a8083063 Iustin Pop
        node_output.append(val)
1174 a8083063 Iustin Pop
      output.append(node_output)
1175 a8083063 Iustin Pop
1176 a8083063 Iustin Pop
    return output
1177 a8083063 Iustin Pop
1178 a8083063 Iustin Pop
1179 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1180 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1181 dcb93971 Michael Hanselmann

1182 dcb93971 Michael Hanselmann
  """
1183 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1184 dcb93971 Michael Hanselmann
1185 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1186 dcb93971 Michael Hanselmann
    """Check prerequisites.
1187 dcb93971 Michael Hanselmann

1188 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1189 dcb93971 Michael Hanselmann

1190 dcb93971 Michael Hanselmann
    """
1191 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1192 dcb93971 Michael Hanselmann
1193 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1194 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1195 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1196 dcb93971 Michael Hanselmann
1197 dcb93971 Michael Hanselmann
1198 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1199 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1200 dcb93971 Michael Hanselmann

1201 dcb93971 Michael Hanselmann
    """
1202 dcb93971 Michael Hanselmann
    nodenames = utils.NiceSort([node.name for node in self.nodes])
1203 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1204 dcb93971 Michael Hanselmann
1205 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1206 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1207 dcb93971 Michael Hanselmann
1208 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1209 dcb93971 Michael Hanselmann
1210 dcb93971 Michael Hanselmann
    output = []
1211 dcb93971 Michael Hanselmann
    for node in nodenames:
1212 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1213 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1214 dcb93971 Michael Hanselmann
1215 dcb93971 Michael Hanselmann
      for vol in node_vols:
1216 dcb93971 Michael Hanselmann
        node_output = []
1217 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1218 dcb93971 Michael Hanselmann
          if field == "node":
1219 dcb93971 Michael Hanselmann
            val = node
1220 dcb93971 Michael Hanselmann
          elif field == "phys":
1221 dcb93971 Michael Hanselmann
            val = vol['dev']
1222 dcb93971 Michael Hanselmann
          elif field == "vg":
1223 dcb93971 Michael Hanselmann
            val = vol['vg']
1224 dcb93971 Michael Hanselmann
          elif field == "name":
1225 dcb93971 Michael Hanselmann
            val = vol['name']
1226 dcb93971 Michael Hanselmann
          elif field == "size":
1227 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1228 dcb93971 Michael Hanselmann
          elif field == "instance":
1229 dcb93971 Michael Hanselmann
            for inst in ilist:
1230 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1231 dcb93971 Michael Hanselmann
                continue
1232 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1233 dcb93971 Michael Hanselmann
                val = inst.name
1234 dcb93971 Michael Hanselmann
                break
1235 dcb93971 Michael Hanselmann
            else:
1236 dcb93971 Michael Hanselmann
              val = '-'
1237 dcb93971 Michael Hanselmann
          else:
1238 dcb93971 Michael Hanselmann
            raise errors.ParameterError, field
1239 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1240 dcb93971 Michael Hanselmann
1241 dcb93971 Michael Hanselmann
        output.append(node_output)
1242 dcb93971 Michael Hanselmann
1243 dcb93971 Michael Hanselmann
    return output
1244 dcb93971 Michael Hanselmann
1245 dcb93971 Michael Hanselmann
1246 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1247 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1248 a8083063 Iustin Pop

1249 a8083063 Iustin Pop
  """
1250 a8083063 Iustin Pop
  HPATH = "node-add"
1251 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1252 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1253 a8083063 Iustin Pop
1254 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1255 a8083063 Iustin Pop
    """Build hooks env.
1256 a8083063 Iustin Pop

1257 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1258 a8083063 Iustin Pop

1259 a8083063 Iustin Pop
    """
1260 a8083063 Iustin Pop
    env = {
1261 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1262 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1263 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1264 a8083063 Iustin Pop
      }
1265 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1266 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1267 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1268 a8083063 Iustin Pop
1269 a8083063 Iustin Pop
  def CheckPrereq(self):
1270 a8083063 Iustin Pop
    """Check prerequisites.
1271 a8083063 Iustin Pop

1272 a8083063 Iustin Pop
    This checks:
1273 a8083063 Iustin Pop
     - the new node is not already in the config
1274 a8083063 Iustin Pop
     - it is resolvable
1275 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1276 a8083063 Iustin Pop

1277 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1278 a8083063 Iustin Pop

1279 a8083063 Iustin Pop
    """
1280 a8083063 Iustin Pop
    node_name = self.op.node_name
1281 a8083063 Iustin Pop
    cfg = self.cfg
1282 a8083063 Iustin Pop
1283 a8083063 Iustin Pop
    dns_data = utils.LookupHostname(node_name)
1284 a8083063 Iustin Pop
    if not dns_data:
1285 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node %s is not resolvable" % node_name)
1286 a8083063 Iustin Pop
1287 a8083063 Iustin Pop
    node = dns_data['hostname']
1288 a8083063 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data['ip']
1289 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1290 a8083063 Iustin Pop
    if secondary_ip is None:
1291 a8083063 Iustin Pop
      secondary_ip = primary_ip
1292 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1293 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid secondary IP given")
1294 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1295 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1296 a8083063 Iustin Pop
    if node in node_list:
1297 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node %s is already in the configuration"
1298 a8083063 Iustin Pop
                                   % node)
1299 a8083063 Iustin Pop
1300 a8083063 Iustin Pop
    for existing_node_name in node_list:
1301 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1302 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1303 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1304 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1305 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1306 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("New node ip address(es) conflict with"
1307 a8083063 Iustin Pop
                                     " existing node %s" % existing_node.name)
1308 a8083063 Iustin Pop
1309 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1310 a8083063 Iustin Pop
    # same as for the master
1311 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1312 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1313 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1314 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1315 a8083063 Iustin Pop
      if master_singlehomed:
1316 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("The master has no private ip but the"
1317 a8083063 Iustin Pop
                                     " new node has one")
1318 a8083063 Iustin Pop
      else:
1319 a8083063 Iustin Pop
        raise errors.OpPrereqError ("The master has a private ip but the"
1320 a8083063 Iustin Pop
                                    " new node doesn't have one")
1321 a8083063 Iustin Pop
1322 a8083063 Iustin Pop
    # checks reachablity
1323 a8083063 Iustin Pop
    command = ["fping", "-q", primary_ip]
1324 a8083063 Iustin Pop
    result = utils.RunCmd(command)
1325 a8083063 Iustin Pop
    if result.failed:
1326 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node not reachable by ping")
1327 a8083063 Iustin Pop
1328 a8083063 Iustin Pop
    if not newbie_singlehomed:
1329 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1330 a8083063 Iustin Pop
      command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip]
1331 a8083063 Iustin Pop
      result = utils.RunCmd(command)
1332 a8083063 Iustin Pop
      if result.failed:
1333 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Node secondary ip not reachable by ping")
1334 a8083063 Iustin Pop
1335 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1336 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1337 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1338 a8083063 Iustin Pop
1339 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1340 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1341 a8083063 Iustin Pop

1342 a8083063 Iustin Pop
    """
1343 a8083063 Iustin Pop
    new_node = self.new_node
1344 a8083063 Iustin Pop
    node = new_node.name
1345 a8083063 Iustin Pop
1346 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1347 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1348 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1349 a8083063 Iustin Pop
      raise errors.OpExecError, ("ganeti password corruption detected")
1350 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1351 a8083063 Iustin Pop
    try:
1352 a8083063 Iustin Pop
      gntpem = f.read(8192)
1353 a8083063 Iustin Pop
    finally:
1354 a8083063 Iustin Pop
      f.close()
1355 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1356 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1357 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1358 a8083063 Iustin Pop
    # parsed by the shell sequence below
1359 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1360 a8083063 Iustin Pop
      raise errors.OpExecError, ("invalid PEM encoding in the SSL certificate")
1361 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1362 a8083063 Iustin Pop
      raise errors.OpExecError, ("PEM must end with newline")
1363 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1364 a8083063 Iustin Pop
1365 a8083063 Iustin Pop
    # remove first the root's known_hosts file
1366 a8083063 Iustin Pop
    utils.RemoveFile("/root/.ssh/known_hosts")
1367 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1368 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1369 a8083063 Iustin Pop
    # either by being constants or by the checks above
1370 a8083063 Iustin Pop
    ss = self.sstore
1371 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1372 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1373 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1374 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1375 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1376 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1377 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1378 a8083063 Iustin Pop
1379 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1380 a8083063 Iustin Pop
    if result.failed:
1381 a8083063 Iustin Pop
      raise errors.OpExecError, ("Remote command on node %s, error: %s,"
1382 a8083063 Iustin Pop
                                 " output: %s" %
1383 a8083063 Iustin Pop
                                 (node, result.fail_reason, result.output))
1384 a8083063 Iustin Pop
1385 a8083063 Iustin Pop
    # check connectivity
1386 a8083063 Iustin Pop
    time.sleep(4)
1387 a8083063 Iustin Pop
1388 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1389 a8083063 Iustin Pop
    if result:
1390 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1391 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1392 a8083063 Iustin Pop
                    (node, result))
1393 a8083063 Iustin Pop
      else:
1394 a8083063 Iustin Pop
        raise errors.OpExecError, ("Version mismatch master version %s,"
1395 a8083063 Iustin Pop
                                   " node version %s" %
1396 a8083063 Iustin Pop
                                   (constants.PROTOCOL_VERSION, result))
1397 a8083063 Iustin Pop
    else:
1398 a8083063 Iustin Pop
      raise errors.OpExecError, ("Cannot get version from the new node")
1399 a8083063 Iustin Pop
1400 a8083063 Iustin Pop
    # setup ssh on node
1401 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1402 a8083063 Iustin Pop
    keyarray = []
1403 a8083063 Iustin Pop
    keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub",
1404 a8083063 Iustin Pop
                "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub",
1405 a8083063 Iustin Pop
                "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"]
1406 a8083063 Iustin Pop
1407 a8083063 Iustin Pop
    for i in keyfiles:
1408 a8083063 Iustin Pop
      f = open(i, 'r')
1409 a8083063 Iustin Pop
      try:
1410 a8083063 Iustin Pop
        keyarray.append(f.read())
1411 a8083063 Iustin Pop
      finally:
1412 a8083063 Iustin Pop
        f.close()
1413 a8083063 Iustin Pop
1414 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1415 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1416 a8083063 Iustin Pop
1417 a8083063 Iustin Pop
    if not result:
1418 a8083063 Iustin Pop
      raise errors.OpExecError, ("Cannot transfer ssh keys to the new node")
1419 a8083063 Iustin Pop
1420 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1421 a8083063 Iustin Pop
    _UpdateEtcHosts(new_node.name, new_node.primary_ip)
1422 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1423 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1424 a8083063 Iustin Pop
1425 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1426 a8083063 Iustin Pop
      result = ssh.SSHCall(node, "root",
1427 a8083063 Iustin Pop
                           "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip)
1428 a8083063 Iustin Pop
      if result.failed:
1429 a8083063 Iustin Pop
        raise errors.OpExecError, ("Node claims it doesn't have the"
1430 a8083063 Iustin Pop
                                   " secondary ip you gave (%s).\n"
1431 a8083063 Iustin Pop
                                   "Please fix and re-run this command." %
1432 a8083063 Iustin Pop
                                   new_node.secondary_ip)
1433 a8083063 Iustin Pop
1434 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1435 a8083063 Iustin Pop
    # including the node just added
1436 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1437 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1438 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1439 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1440 a8083063 Iustin Pop
1441 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1442 a8083063 Iustin Pop
    for fname in ("/etc/hosts", "/etc/ssh/ssh_known_hosts"):
1443 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1444 a8083063 Iustin Pop
      for to_node in dist_nodes:
1445 a8083063 Iustin Pop
        if not result[to_node]:
1446 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1447 a8083063 Iustin Pop
                       (fname, to_node))
1448 a8083063 Iustin Pop
1449 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1450 a8083063 Iustin Pop
    for fname in to_copy:
1451 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1452 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1453 a8083063 Iustin Pop
1454 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1455 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1456 a8083063 Iustin Pop
1457 a8083063 Iustin Pop
1458 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1459 a8083063 Iustin Pop
  """Failover the master node to the current node.
1460 a8083063 Iustin Pop

1461 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1462 a8083063 Iustin Pop

1463 a8083063 Iustin Pop
  """
1464 a8083063 Iustin Pop
  HPATH = "master-failover"
1465 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1466 a8083063 Iustin Pop
  REQ_MASTER = False
1467 a8083063 Iustin Pop
  _OP_REQP = []
1468 a8083063 Iustin Pop
1469 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1470 a8083063 Iustin Pop
    """Build hooks env.
1471 a8083063 Iustin Pop

1472 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1473 a8083063 Iustin Pop
    the nodes in the post phase.
1474 a8083063 Iustin Pop

1475 a8083063 Iustin Pop
    """
1476 a8083063 Iustin Pop
    env = {
1477 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1478 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1479 a8083063 Iustin Pop
      }
1480 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1481 a8083063 Iustin Pop
1482 a8083063 Iustin Pop
  def CheckPrereq(self):
1483 a8083063 Iustin Pop
    """Check prerequisites.
1484 a8083063 Iustin Pop

1485 a8083063 Iustin Pop
    This checks that we are not already the master.
1486 a8083063 Iustin Pop

1487 a8083063 Iustin Pop
    """
1488 a8083063 Iustin Pop
    self.new_master = socket.gethostname()
1489 a8083063 Iustin Pop
1490 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1491 a8083063 Iustin Pop
1492 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1493 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("This commands must be run on the node"
1494 a8083063 Iustin Pop
                                   " where you want the new master to be.\n"
1495 a8083063 Iustin Pop
                                   "%s is already the master" %
1496 a8083063 Iustin Pop
                                   self.old_master)
1497 a8083063 Iustin Pop
1498 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1499 a8083063 Iustin Pop
    """Failover the master node.
1500 a8083063 Iustin Pop

1501 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1502 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1503 a8083063 Iustin Pop
    master.
1504 a8083063 Iustin Pop

1505 a8083063 Iustin Pop
    """
1506 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1507 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1508 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1509 a8083063 Iustin Pop
1510 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1511 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1512 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1513 a8083063 Iustin Pop
1514 880478f8 Iustin Pop
    ss = self.sstore
1515 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1516 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1517 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1518 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1519 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1520 880478f8 Iustin Pop
1521 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1522 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1523 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1524 880478f8 Iustin Pop
      feedback_fn("Error in activating the master IP on the new master,\n"
1525 880478f8 Iustin Pop
                  "please fix manually.")
1526 a8083063 Iustin Pop
1527 a8083063 Iustin Pop
1528 a8083063 Iustin Pop
1529 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1530 a8083063 Iustin Pop
  """Query cluster configuration.
1531 a8083063 Iustin Pop

1532 a8083063 Iustin Pop
  """
1533 a8083063 Iustin Pop
  _OP_REQP = []
1534 59322403 Iustin Pop
  REQ_MASTER = False
1535 a8083063 Iustin Pop
1536 a8083063 Iustin Pop
  def CheckPrereq(self):
1537 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1538 a8083063 Iustin Pop

1539 a8083063 Iustin Pop
    """
1540 a8083063 Iustin Pop
    pass
1541 a8083063 Iustin Pop
1542 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1543 a8083063 Iustin Pop
    """Return cluster config.
1544 a8083063 Iustin Pop

1545 a8083063 Iustin Pop
    """
1546 a8083063 Iustin Pop
    result = {
1547 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1548 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1549 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1550 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1551 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1552 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1553 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1554 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1555 a8083063 Iustin Pop
      }
1556 a8083063 Iustin Pop
1557 a8083063 Iustin Pop
    return result
1558 a8083063 Iustin Pop
1559 a8083063 Iustin Pop
1560 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1561 a8083063 Iustin Pop
  """Copy file to cluster.
1562 a8083063 Iustin Pop

1563 a8083063 Iustin Pop
  """
1564 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1565 a8083063 Iustin Pop
1566 a8083063 Iustin Pop
  def CheckPrereq(self):
1567 a8083063 Iustin Pop
    """Check prerequisites.
1568 a8083063 Iustin Pop

1569 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1570 a8083063 Iustin Pop
    of nodes is valid.
1571 a8083063 Iustin Pop

1572 a8083063 Iustin Pop
    """
1573 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1574 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1575 dcb93971 Michael Hanselmann
1576 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1577 a8083063 Iustin Pop
1578 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1579 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1580 a8083063 Iustin Pop

1581 a8083063 Iustin Pop
    Args:
1582 a8083063 Iustin Pop
      opts - class with options as members
1583 a8083063 Iustin Pop
      args - list containing a single element, the file name
1584 a8083063 Iustin Pop
    Opts used:
1585 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1586 a8083063 Iustin Pop

1587 a8083063 Iustin Pop
    """
1588 a8083063 Iustin Pop
    filename = self.op.filename
1589 a8083063 Iustin Pop
1590 a8083063 Iustin Pop
    myname = socket.gethostname()
1591 a8083063 Iustin Pop
1592 a8083063 Iustin Pop
    for node in self.nodes:
1593 a8083063 Iustin Pop
      if node == myname:
1594 a8083063 Iustin Pop
        continue
1595 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1596 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1597 a8083063 Iustin Pop
1598 a8083063 Iustin Pop
1599 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1600 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1601 a8083063 Iustin Pop

1602 a8083063 Iustin Pop
  """
1603 a8083063 Iustin Pop
  _OP_REQP = []
1604 a8083063 Iustin Pop
1605 a8083063 Iustin Pop
  def CheckPrereq(self):
1606 a8083063 Iustin Pop
    """No prerequisites.
1607 a8083063 Iustin Pop

1608 a8083063 Iustin Pop
    """
1609 a8083063 Iustin Pop
    pass
1610 a8083063 Iustin Pop
1611 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1612 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1613 a8083063 Iustin Pop

1614 a8083063 Iustin Pop
    """
1615 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1616 a8083063 Iustin Pop
1617 a8083063 Iustin Pop
1618 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1619 a8083063 Iustin Pop
  """Run a command on some nodes.
1620 a8083063 Iustin Pop

1621 a8083063 Iustin Pop
  """
1622 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1623 a8083063 Iustin Pop
1624 a8083063 Iustin Pop
  def CheckPrereq(self):
1625 a8083063 Iustin Pop
    """Check prerequisites.
1626 a8083063 Iustin Pop

1627 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1628 a8083063 Iustin Pop

1629 a8083063 Iustin Pop
    """
1630 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1631 a8083063 Iustin Pop
1632 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1633 a8083063 Iustin Pop
    """Run a command on some nodes.
1634 a8083063 Iustin Pop

1635 a8083063 Iustin Pop
    """
1636 a8083063 Iustin Pop
    data = []
1637 a8083063 Iustin Pop
    for node in self.nodes:
1638 dcb93971 Michael Hanselmann
      result = utils.RunCmd(["ssh", node.name, self.op.command])
1639 dcb93971 Michael Hanselmann
      data.append((node.name, result.cmd, result.output, result.exit_code))
1640 a8083063 Iustin Pop
1641 a8083063 Iustin Pop
    return data
1642 a8083063 Iustin Pop
1643 a8083063 Iustin Pop
1644 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1645 a8083063 Iustin Pop
  """Bring up an instance's disks.
1646 a8083063 Iustin Pop

1647 a8083063 Iustin Pop
  """
1648 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1649 a8083063 Iustin Pop
1650 a8083063 Iustin Pop
  def CheckPrereq(self):
1651 a8083063 Iustin Pop
    """Check prerequisites.
1652 a8083063 Iustin Pop

1653 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1654 a8083063 Iustin Pop

1655 a8083063 Iustin Pop
    """
1656 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1657 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1658 a8083063 Iustin Pop
    if instance is None:
1659 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1660 a8083063 Iustin Pop
                                   self.op.instance_name)
1661 a8083063 Iustin Pop
    self.instance = instance
1662 a8083063 Iustin Pop
1663 a8083063 Iustin Pop
1664 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1665 a8083063 Iustin Pop
    """Activate the disks.
1666 a8083063 Iustin Pop

1667 a8083063 Iustin Pop
    """
1668 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1669 a8083063 Iustin Pop
    if not disks_ok:
1670 a8083063 Iustin Pop
      raise errors.OpExecError, ("Cannot activate block devices")
1671 a8083063 Iustin Pop
1672 a8083063 Iustin Pop
    return disks_info
1673 a8083063 Iustin Pop
1674 a8083063 Iustin Pop
1675 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1676 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1677 a8083063 Iustin Pop

1678 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1679 a8083063 Iustin Pop

1680 a8083063 Iustin Pop
  Args:
1681 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1682 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1683 a8083063 Iustin Pop
                        in an error return from the function
1684 a8083063 Iustin Pop

1685 a8083063 Iustin Pop
  Returns:
1686 a8083063 Iustin Pop
    false if the operation failed
1687 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1688 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1689 a8083063 Iustin Pop
  """
1690 a8083063 Iustin Pop
  device_info = []
1691 a8083063 Iustin Pop
  disks_ok = True
1692 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1693 a8083063 Iustin Pop
    master_result = None
1694 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1695 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1696 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1697 a8083063 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, is_primary)
1698 a8083063 Iustin Pop
      if not result:
1699 a8083063 Iustin Pop
        logger.Error("could not prepare block device %s on node %s (is_pri"
1700 a8083063 Iustin Pop
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1701 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1702 a8083063 Iustin Pop
          disks_ok = False
1703 a8083063 Iustin Pop
      if is_primary:
1704 a8083063 Iustin Pop
        master_result = result
1705 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1706 a8083063 Iustin Pop
                        master_result))
1707 a8083063 Iustin Pop
1708 a8083063 Iustin Pop
  return disks_ok, device_info
1709 a8083063 Iustin Pop
1710 a8083063 Iustin Pop
1711 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1712 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1713 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1714 fe7b0351 Michael Hanselmann
  if not disks_ok:
1715 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1716 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1717 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1718 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1719 fe7b0351 Michael Hanselmann
    raise errors.OpExecError, ("Disk consistency error")
1720 fe7b0351 Michael Hanselmann
1721 fe7b0351 Michael Hanselmann
1722 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1723 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1724 a8083063 Iustin Pop

1725 a8083063 Iustin Pop
  """
1726 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1727 a8083063 Iustin Pop
1728 a8083063 Iustin Pop
  def CheckPrereq(self):
1729 a8083063 Iustin Pop
    """Check prerequisites.
1730 a8083063 Iustin Pop

1731 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1732 a8083063 Iustin Pop

1733 a8083063 Iustin Pop
    """
1734 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1735 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1736 a8083063 Iustin Pop
    if instance is None:
1737 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1738 a8083063 Iustin Pop
                                   self.op.instance_name)
1739 a8083063 Iustin Pop
    self.instance = instance
1740 a8083063 Iustin Pop
1741 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1742 a8083063 Iustin Pop
    """Deactivate the disks
1743 a8083063 Iustin Pop

1744 a8083063 Iustin Pop
    """
1745 a8083063 Iustin Pop
    instance = self.instance
1746 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1747 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1748 a8083063 Iustin Pop
    if not type(ins_l) is list:
1749 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't contact node '%s'" %
1750 a8083063 Iustin Pop
                                 instance.primary_node)
1751 a8083063 Iustin Pop
1752 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1753 a8083063 Iustin Pop
      raise errors.OpExecError, ("Instance is running, can't shutdown"
1754 a8083063 Iustin Pop
                                 " block devices.")
1755 a8083063 Iustin Pop
1756 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1757 a8083063 Iustin Pop
1758 a8083063 Iustin Pop
1759 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1760 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1761 a8083063 Iustin Pop

1762 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1763 a8083063 Iustin Pop

1764 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1765 a8083063 Iustin Pop
  ignored.
1766 a8083063 Iustin Pop

1767 a8083063 Iustin Pop
  """
1768 a8083063 Iustin Pop
  result = True
1769 a8083063 Iustin Pop
  for disk in instance.disks:
1770 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1771 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1772 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1773 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1774 a8083063 Iustin Pop
                     (disk.iv_name, node))
1775 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1776 a8083063 Iustin Pop
          result = False
1777 a8083063 Iustin Pop
  return result
1778 a8083063 Iustin Pop
1779 a8083063 Iustin Pop
1780 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1781 a8083063 Iustin Pop
  """Starts an instance.
1782 a8083063 Iustin Pop

1783 a8083063 Iustin Pop
  """
1784 a8083063 Iustin Pop
  HPATH = "instance-start"
1785 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1786 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1787 a8083063 Iustin Pop
1788 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1789 a8083063 Iustin Pop
    """Build hooks env.
1790 a8083063 Iustin Pop

1791 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1792 a8083063 Iustin Pop

1793 a8083063 Iustin Pop
    """
1794 a8083063 Iustin Pop
    env = {
1795 a8083063 Iustin Pop
      "FORCE": self.op.force,
1796 a8083063 Iustin Pop
      }
1797 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1798 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1799 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1800 a8083063 Iustin Pop
    return env, nl, nl
1801 a8083063 Iustin Pop
1802 a8083063 Iustin Pop
  def CheckPrereq(self):
1803 a8083063 Iustin Pop
    """Check prerequisites.
1804 a8083063 Iustin Pop

1805 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1806 a8083063 Iustin Pop

1807 a8083063 Iustin Pop
    """
1808 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1809 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1810 a8083063 Iustin Pop
    if instance is None:
1811 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1812 a8083063 Iustin Pop
                                   self.op.instance_name)
1813 a8083063 Iustin Pop
1814 a8083063 Iustin Pop
    # check bridges existance
1815 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
1816 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
1817 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("one or more target bridges %s does not"
1818 a8083063 Iustin Pop
                                   " exist on destination node '%s'" %
1819 a8083063 Iustin Pop
                                   (brlist, instance.primary_node))
1820 a8083063 Iustin Pop
1821 a8083063 Iustin Pop
    self.instance = instance
1822 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1823 a8083063 Iustin Pop
1824 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1825 a8083063 Iustin Pop
    """Start the instance.
1826 a8083063 Iustin Pop

1827 a8083063 Iustin Pop
    """
1828 a8083063 Iustin Pop
    instance = self.instance
1829 a8083063 Iustin Pop
    force = self.op.force
1830 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1831 a8083063 Iustin Pop
1832 a8083063 Iustin Pop
    node_current = instance.primary_node
1833 a8083063 Iustin Pop
1834 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1835 a8083063 Iustin Pop
    if not nodeinfo:
1836 a8083063 Iustin Pop
      raise errors.OpExecError, ("Could not contact node %s for infos" %
1837 a8083063 Iustin Pop
                                 (node_current))
1838 a8083063 Iustin Pop
1839 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
1840 a8083063 Iustin Pop
    memory = instance.memory
1841 a8083063 Iustin Pop
    if memory > freememory:
1842 a8083063 Iustin Pop
      raise errors.OpExecError, ("Not enough memory to start instance"
1843 a8083063 Iustin Pop
                                 " %s on node %s"
1844 a8083063 Iustin Pop
                                 " needed %s MiB, available %s MiB" %
1845 a8083063 Iustin Pop
                                 (instance.name, node_current, memory,
1846 a8083063 Iustin Pop
                                  freememory))
1847 a8083063 Iustin Pop
1848 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
1849 a8083063 Iustin Pop
1850 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
1851 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
1852 a8083063 Iustin Pop
      raise errors.OpExecError, ("Could not start instance")
1853 a8083063 Iustin Pop
1854 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
1855 a8083063 Iustin Pop
1856 a8083063 Iustin Pop
1857 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
1858 a8083063 Iustin Pop
  """Shutdown an instance.
1859 a8083063 Iustin Pop

1860 a8083063 Iustin Pop
  """
1861 a8083063 Iustin Pop
  HPATH = "instance-stop"
1862 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1863 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1864 a8083063 Iustin Pop
1865 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1866 a8083063 Iustin Pop
    """Build hooks env.
1867 a8083063 Iustin Pop

1868 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1869 a8083063 Iustin Pop

1870 a8083063 Iustin Pop
    """
1871 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
1872 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1873 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1874 a8083063 Iustin Pop
    return env, nl, nl
1875 a8083063 Iustin Pop
1876 a8083063 Iustin Pop
  def CheckPrereq(self):
1877 a8083063 Iustin Pop
    """Check prerequisites.
1878 a8083063 Iustin Pop

1879 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1880 a8083063 Iustin Pop

1881 a8083063 Iustin Pop
    """
1882 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1883 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1884 a8083063 Iustin Pop
    if instance is None:
1885 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1886 a8083063 Iustin Pop
                                   self.op.instance_name)
1887 a8083063 Iustin Pop
    self.instance = instance
1888 a8083063 Iustin Pop
1889 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1890 a8083063 Iustin Pop
    """Shutdown the instance.
1891 a8083063 Iustin Pop

1892 a8083063 Iustin Pop
    """
1893 a8083063 Iustin Pop
    instance = self.instance
1894 a8083063 Iustin Pop
    node_current = instance.primary_node
1895 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
1896 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
1897 a8083063 Iustin Pop
1898 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
1899 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1900 a8083063 Iustin Pop
1901 a8083063 Iustin Pop
1902 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
1903 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
1904 fe7b0351 Michael Hanselmann

1905 fe7b0351 Michael Hanselmann
  """
1906 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
1907 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
1908 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
1909 fe7b0351 Michael Hanselmann
1910 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
1911 fe7b0351 Michael Hanselmann
    """Build hooks env.
1912 fe7b0351 Michael Hanselmann

1913 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
1914 fe7b0351 Michael Hanselmann

1915 fe7b0351 Michael Hanselmann
    """
1916 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
1917 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1918 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
1919 fe7b0351 Michael Hanselmann
    return env, nl, nl
1920 fe7b0351 Michael Hanselmann
1921 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
1922 fe7b0351 Michael Hanselmann
    """Check prerequisites.
1923 fe7b0351 Michael Hanselmann

1924 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
1925 fe7b0351 Michael Hanselmann

1926 fe7b0351 Michael Hanselmann
    """
1927 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
1928 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
1929 fe7b0351 Michael Hanselmann
    if instance is None:
1930 fe7b0351 Michael Hanselmann
      raise errors.OpPrereqError, ("Instance '%s' not known" %
1931 fe7b0351 Michael Hanselmann
                                   self.op.instance_name)
1932 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
1933 fe7b0351 Michael Hanselmann
      raise errors.OpPrereqError, ("Instance '%s' has no disks" %
1934 fe7b0351 Michael Hanselmann
                                   self.op.instance_name)
1935 fe7b0351 Michael Hanselmann
    if instance.status != "down":
1936 fe7b0351 Michael Hanselmann
      raise errors.OpPrereqError, ("Instance '%s' is marked to be up" %
1937 fe7b0351 Michael Hanselmann
                                   self.op.instance_name)
1938 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
1939 fe7b0351 Michael Hanselmann
    if remote_info:
1940 fe7b0351 Michael Hanselmann
      raise errors.OpPrereqError, ("Instance '%s' is running on the node %s" %
1941 fe7b0351 Michael Hanselmann
                                   (self.op.instance_name,
1942 fe7b0351 Michael Hanselmann
                                    instance.primary_node))
1943 d0834de3 Michael Hanselmann
1944 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
1945 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
1946 d0834de3 Michael Hanselmann
      # OS verification
1947 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
1948 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
1949 d0834de3 Michael Hanselmann
      if pnode is None:
1950 d0834de3 Michael Hanselmann
        raise errors.OpPrereqError, ("Primary node '%s' is unknown" %
1951 d0834de3 Michael Hanselmann
                                     self.op.pnode)
1952 d0834de3 Michael Hanselmann
      os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
1953 d0834de3 Michael Hanselmann
      if not isinstance(os_obj, objects.OS):
1954 d0834de3 Michael Hanselmann
        raise errors.OpPrereqError, ("OS '%s' not in supported OS list for"
1955 d0834de3 Michael Hanselmann
                                     " primary node"  % self.op.os_type)
1956 d0834de3 Michael Hanselmann
1957 fe7b0351 Michael Hanselmann
    self.instance = instance
1958 fe7b0351 Michael Hanselmann
1959 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
1960 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
1961 fe7b0351 Michael Hanselmann

1962 fe7b0351 Michael Hanselmann
    """
1963 fe7b0351 Michael Hanselmann
    inst = self.instance
1964 fe7b0351 Michael Hanselmann
1965 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
1966 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
1967 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
1968 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
1969 d0834de3 Michael Hanselmann
1970 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
1971 fe7b0351 Michael Hanselmann
    try:
1972 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
1973 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
1974 fe7b0351 Michael Hanselmann
        raise errors.OpExecError, ("Could not install OS for instance %s "
1975 fe7b0351 Michael Hanselmann
                                   "on node %s" %
1976 fe7b0351 Michael Hanselmann
                                   (inst.name, inst.primary_node))
1977 fe7b0351 Michael Hanselmann
    finally:
1978 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
1979 fe7b0351 Michael Hanselmann
1980 fe7b0351 Michael Hanselmann
1981 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
1982 a8083063 Iustin Pop
  """Remove an instance.
1983 a8083063 Iustin Pop

1984 a8083063 Iustin Pop
  """
1985 a8083063 Iustin Pop
  HPATH = "instance-remove"
1986 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1987 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1988 a8083063 Iustin Pop
1989 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1990 a8083063 Iustin Pop
    """Build hooks env.
1991 a8083063 Iustin Pop

1992 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1993 a8083063 Iustin Pop

1994 a8083063 Iustin Pop
    """
1995 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
1996 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1997 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1998 a8083063 Iustin Pop
    return env, nl, nl
1999 a8083063 Iustin Pop
2000 a8083063 Iustin Pop
  def CheckPrereq(self):
2001 a8083063 Iustin Pop
    """Check prerequisites.
2002 a8083063 Iustin Pop

2003 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2004 a8083063 Iustin Pop

2005 a8083063 Iustin Pop
    """
2006 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2007 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2008 a8083063 Iustin Pop
    if instance is None:
2009 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2010 a8083063 Iustin Pop
                                   self.op.instance_name)
2011 a8083063 Iustin Pop
    self.instance = instance
2012 a8083063 Iustin Pop
2013 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2014 a8083063 Iustin Pop
    """Remove the instance.
2015 a8083063 Iustin Pop

2016 a8083063 Iustin Pop
    """
2017 a8083063 Iustin Pop
    instance = self.instance
2018 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2019 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2020 a8083063 Iustin Pop
2021 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2022 a8083063 Iustin Pop
      raise errors.OpExecError, ("Could not shutdown instance %s on node %s" %
2023 a8083063 Iustin Pop
                                 (instance.name, instance.primary_node))
2024 a8083063 Iustin Pop
2025 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2026 a8083063 Iustin Pop
2027 a8083063 Iustin Pop
    _RemoveDisks(instance, self.cfg)
2028 a8083063 Iustin Pop
2029 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2030 a8083063 Iustin Pop
2031 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2032 a8083063 Iustin Pop
2033 a8083063 Iustin Pop
2034 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2035 a8083063 Iustin Pop
  """Logical unit for querying instances.
2036 a8083063 Iustin Pop

2037 a8083063 Iustin Pop
  """
2038 dcb93971 Michael Hanselmann
  _OP_REQP = ["output_fields"]
2039 a8083063 Iustin Pop
2040 a8083063 Iustin Pop
  def CheckPrereq(self):
2041 a8083063 Iustin Pop
    """Check prerequisites.
2042 a8083063 Iustin Pop

2043 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2044 a8083063 Iustin Pop

2045 a8083063 Iustin Pop
    """
2046 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
2047 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2048 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2049 dcb93971 Michael Hanselmann
                               "disk_template", "ip", "mac", "bridge"],
2050 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2051 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2052 a8083063 Iustin Pop
2053 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2054 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2055 a8083063 Iustin Pop

2056 a8083063 Iustin Pop
    """
2057 a8083063 Iustin Pop
    instance_names = utils.NiceSort(self.cfg.GetInstanceList())
2058 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2059 a8083063 Iustin Pop
                     in instance_names]
2060 a8083063 Iustin Pop
2061 a8083063 Iustin Pop
    # begin data gathering
2062 a8083063 Iustin Pop
2063 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2064 a8083063 Iustin Pop
2065 a8083063 Iustin Pop
    bad_nodes = []
2066 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2067 a8083063 Iustin Pop
      live_data = {}
2068 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2069 a8083063 Iustin Pop
      for name in nodes:
2070 a8083063 Iustin Pop
        result = node_data[name]
2071 a8083063 Iustin Pop
        if result:
2072 a8083063 Iustin Pop
          live_data.update(result)
2073 a8083063 Iustin Pop
        elif result == False:
2074 a8083063 Iustin Pop
          bad_nodes.append(name)
2075 a8083063 Iustin Pop
        # else no instance is alive
2076 a8083063 Iustin Pop
    else:
2077 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2078 a8083063 Iustin Pop
2079 a8083063 Iustin Pop
    # end data gathering
2080 a8083063 Iustin Pop
2081 a8083063 Iustin Pop
    output = []
2082 a8083063 Iustin Pop
    for instance in instance_list:
2083 a8083063 Iustin Pop
      iout = []
2084 a8083063 Iustin Pop
      for field in self.op.output_fields:
2085 a8083063 Iustin Pop
        if field == "name":
2086 a8083063 Iustin Pop
          val = instance.name
2087 a8083063 Iustin Pop
        elif field == "os":
2088 a8083063 Iustin Pop
          val = instance.os
2089 a8083063 Iustin Pop
        elif field == "pnode":
2090 a8083063 Iustin Pop
          val = instance.primary_node
2091 a8083063 Iustin Pop
        elif field == "snodes":
2092 a8083063 Iustin Pop
          val = ",".join(instance.secondary_nodes) or "-"
2093 a8083063 Iustin Pop
        elif field == "admin_state":
2094 a8083063 Iustin Pop
          if instance.status == "down":
2095 a8083063 Iustin Pop
            val = "no"
2096 a8083063 Iustin Pop
          else:
2097 a8083063 Iustin Pop
            val = "yes"
2098 a8083063 Iustin Pop
        elif field == "oper_state":
2099 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2100 a8083063 Iustin Pop
            val = "(node down)"
2101 a8083063 Iustin Pop
          else:
2102 a8083063 Iustin Pop
            if live_data.get(instance.name):
2103 a8083063 Iustin Pop
              val = "running"
2104 a8083063 Iustin Pop
            else:
2105 a8083063 Iustin Pop
              val = "stopped"
2106 a8083063 Iustin Pop
        elif field == "admin_ram":
2107 a8083063 Iustin Pop
          val = instance.memory
2108 a8083063 Iustin Pop
        elif field == "oper_ram":
2109 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2110 a8083063 Iustin Pop
            val = "(node down)"
2111 a8083063 Iustin Pop
          elif instance.name in live_data:
2112 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2113 a8083063 Iustin Pop
          else:
2114 a8083063 Iustin Pop
            val = "-"
2115 a8083063 Iustin Pop
        elif field == "disk_template":
2116 a8083063 Iustin Pop
          val = instance.disk_template
2117 a8083063 Iustin Pop
        elif field == "ip":
2118 a8083063 Iustin Pop
          val = instance.nics[0].ip
2119 a8083063 Iustin Pop
        elif field == "bridge":
2120 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2121 a8083063 Iustin Pop
        elif field == "mac":
2122 a8083063 Iustin Pop
          val = instance.nics[0].mac
2123 a8083063 Iustin Pop
        else:
2124 a8083063 Iustin Pop
          raise errors.ParameterError, field
2125 a8083063 Iustin Pop
        val = str(val)
2126 a8083063 Iustin Pop
        iout.append(val)
2127 a8083063 Iustin Pop
      output.append(iout)
2128 a8083063 Iustin Pop
2129 a8083063 Iustin Pop
    return output
2130 a8083063 Iustin Pop
2131 a8083063 Iustin Pop
2132 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2133 a8083063 Iustin Pop
  """Failover an instance.
2134 a8083063 Iustin Pop

2135 a8083063 Iustin Pop
  """
2136 a8083063 Iustin Pop
  HPATH = "instance-failover"
2137 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2138 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2139 a8083063 Iustin Pop
2140 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2141 a8083063 Iustin Pop
    """Build hooks env.
2142 a8083063 Iustin Pop

2143 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2144 a8083063 Iustin Pop

2145 a8083063 Iustin Pop
    """
2146 a8083063 Iustin Pop
    env = {
2147 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2148 a8083063 Iustin Pop
      }
2149 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2150 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2151 a8083063 Iustin Pop
    return env, nl, nl
2152 a8083063 Iustin Pop
2153 a8083063 Iustin Pop
  def CheckPrereq(self):
2154 a8083063 Iustin Pop
    """Check prerequisites.
2155 a8083063 Iustin Pop

2156 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2157 a8083063 Iustin Pop

2158 a8083063 Iustin Pop
    """
2159 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2160 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2161 a8083063 Iustin Pop
    if instance is None:
2162 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2163 a8083063 Iustin Pop
                                   self.op.instance_name)
2164 a8083063 Iustin Pop
2165 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2166 3a7c308e Guido Trotter
    target_node = instance.secondary_nodes[0]
2167 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2168 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2169 3a7c308e Guido Trotter
    if not info:
2170 3a7c308e Guido Trotter
      raise errors.OpPrereqError, ("Cannot get current information"
2171 3a7c308e Guido Trotter
                                   " from node '%s'" % nodeinfo)
2172 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2173 3a7c308e Guido Trotter
      raise errors.OpPrereqError, ("Not enough memory on target node %s."
2174 3a7c308e Guido Trotter
                                   " %d MB available, %d MB required" %
2175 3a7c308e Guido Trotter
                                   (target_node, info['memory_free'],
2176 3a7c308e Guido Trotter
                                    instance.memory))
2177 3a7c308e Guido Trotter
2178 a8083063 Iustin Pop
    # check bridge existance
2179 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2180 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
2181 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("one or more target bridges %s does not"
2182 a8083063 Iustin Pop
                                   " exist on destination node '%s'" %
2183 a8083063 Iustin Pop
                                   (brlist, instance.primary_node))
2184 a8083063 Iustin Pop
2185 a8083063 Iustin Pop
    self.instance = instance
2186 a8083063 Iustin Pop
2187 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2188 a8083063 Iustin Pop
    """Failover an instance.
2189 a8083063 Iustin Pop

2190 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2191 a8083063 Iustin Pop
    starting it on the secondary.
2192 a8083063 Iustin Pop

2193 a8083063 Iustin Pop
    """
2194 a8083063 Iustin Pop
    instance = self.instance
2195 a8083063 Iustin Pop
2196 a8083063 Iustin Pop
    source_node = instance.primary_node
2197 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2198 a8083063 Iustin Pop
2199 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2200 a8083063 Iustin Pop
    for dev in instance.disks:
2201 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2202 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2203 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2204 a8083063 Iustin Pop
          raise errors.OpExecError, ("Disk %s is degraded on target node,"
2205 a8083063 Iustin Pop
                                     " aborting failover." % dev.iv_name)
2206 a8083063 Iustin Pop
2207 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2208 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2209 a8083063 Iustin Pop
2210 a8083063 Iustin Pop
    if not nodeinfo:
2211 a8083063 Iustin Pop
      raise errors.OpExecError, ("Could not contact target node %s." %
2212 a8083063 Iustin Pop
                                 target_node)
2213 a8083063 Iustin Pop
2214 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2215 a8083063 Iustin Pop
    memory = instance.memory
2216 a8083063 Iustin Pop
    if memory > free_memory:
2217 a8083063 Iustin Pop
      raise errors.OpExecError, ("Not enough memory to create instance %s on"
2218 a8083063 Iustin Pop
                                 " node %s. needed %s MiB, available %s MiB" %
2219 a8083063 Iustin Pop
                                 (instance.name, target_node, memory,
2220 a8083063 Iustin Pop
                                  free_memory))
2221 a8083063 Iustin Pop
2222 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2223 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2224 a8083063 Iustin Pop
                (instance.name, source_node))
2225 a8083063 Iustin Pop
2226 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2227 a8083063 Iustin Pop
      logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2228 a8083063 Iustin Pop
                   " anyway. Please make sure node %s is down"  %
2229 a8083063 Iustin Pop
                   (instance.name, source_node, source_node))
2230 a8083063 Iustin Pop
2231 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2232 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2233 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't shut down the instance's disks.")
2234 a8083063 Iustin Pop
2235 a8083063 Iustin Pop
    instance.primary_node = target_node
2236 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2237 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2238 a8083063 Iustin Pop
2239 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2240 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2241 a8083063 Iustin Pop
                (instance.name, target_node))
2242 a8083063 Iustin Pop
2243 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2244 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2245 a8083063 Iustin Pop
    if not disks_ok:
2246 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2247 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't activate the instance's disks")
2248 a8083063 Iustin Pop
2249 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2250 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2251 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2252 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2253 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2254 a8083063 Iustin Pop
2255 a8083063 Iustin Pop
2256 a0c3fea1 Michael Hanselmann
def _CreateBlockDevOnPrimary(cfg, node, device, info):
2257 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2258 a8083063 Iustin Pop

2259 a8083063 Iustin Pop
  This always creates all devices.
2260 a8083063 Iustin Pop

2261 a8083063 Iustin Pop
  """
2262 a8083063 Iustin Pop
  if device.children:
2263 a8083063 Iustin Pop
    for child in device.children:
2264 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnPrimary(cfg, node, child, info):
2265 a8083063 Iustin Pop
        return False
2266 a8083063 Iustin Pop
2267 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2268 a0c3fea1 Michael Hanselmann
  new_id = rpc.call_blockdev_create(node, device, device.size, True, info)
2269 a8083063 Iustin Pop
  if not new_id:
2270 a8083063 Iustin Pop
    return False
2271 a8083063 Iustin Pop
  if device.physical_id is None:
2272 a8083063 Iustin Pop
    device.physical_id = new_id
2273 a8083063 Iustin Pop
  return True
2274 a8083063 Iustin Pop
2275 a8083063 Iustin Pop
2276 a0c3fea1 Michael Hanselmann
def _CreateBlockDevOnSecondary(cfg, node, device, force, info):
2277 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2278 a8083063 Iustin Pop

2279 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2280 a8083063 Iustin Pop
  all its children.
2281 a8083063 Iustin Pop

2282 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2283 a8083063 Iustin Pop

2284 a8083063 Iustin Pop
  """
2285 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2286 a8083063 Iustin Pop
    force = True
2287 a8083063 Iustin Pop
  if device.children:
2288 a8083063 Iustin Pop
    for child in device.children:
2289 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, node, child, force, info):
2290 a8083063 Iustin Pop
        return False
2291 a8083063 Iustin Pop
2292 a8083063 Iustin Pop
  if not force:
2293 a8083063 Iustin Pop
    return True
2294 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2295 a0c3fea1 Michael Hanselmann
  new_id = rpc.call_blockdev_create(node, device, device.size, False, info)
2296 a8083063 Iustin Pop
  if not new_id:
2297 a8083063 Iustin Pop
    return False
2298 a8083063 Iustin Pop
  if device.physical_id is None:
2299 a8083063 Iustin Pop
    device.physical_id = new_id
2300 a8083063 Iustin Pop
  return True
2301 a8083063 Iustin Pop
2302 a8083063 Iustin Pop
2303 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2304 923b1523 Iustin Pop
  """Generate a suitable LV name.
2305 923b1523 Iustin Pop

2306 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2307 923b1523 Iustin Pop

2308 923b1523 Iustin Pop
  """
2309 923b1523 Iustin Pop
  results = []
2310 923b1523 Iustin Pop
  for val in exts:
2311 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2312 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2313 923b1523 Iustin Pop
  return results
2314 923b1523 Iustin Pop
2315 923b1523 Iustin Pop
2316 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2317 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2318 a8083063 Iustin Pop

2319 a8083063 Iustin Pop
  """
2320 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2321 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2322 a8083063 Iustin Pop
  dev_data = objects.Disk(dev_type="lvm", size=size,
2323 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2324 a8083063 Iustin Pop
  dev_meta = objects.Disk(dev_type="lvm", size=128,
2325 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2326 a8083063 Iustin Pop
  drbd_dev = objects.Disk(dev_type="drbd", size=size,
2327 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2328 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2329 a8083063 Iustin Pop
  return drbd_dev
2330 a8083063 Iustin Pop
2331 a8083063 Iustin Pop
2332 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2333 a8083063 Iustin Pop
                          instance_name, primary_node,
2334 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2335 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2336 a8083063 Iustin Pop

2337 a8083063 Iustin Pop
  """
2338 a8083063 Iustin Pop
  #TODO: compute space requirements
2339 a8083063 Iustin Pop
2340 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2341 a8083063 Iustin Pop
  if template_name == "diskless":
2342 a8083063 Iustin Pop
    disks = []
2343 a8083063 Iustin Pop
  elif template_name == "plain":
2344 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2345 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2346 923b1523 Iustin Pop
2347 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2348 a8083063 Iustin Pop
    sda_dev = objects.Disk(dev_type="lvm", size=disk_sz,
2349 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2350 a8083063 Iustin Pop
                           iv_name = "sda")
2351 a8083063 Iustin Pop
    sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz,
2352 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2353 a8083063 Iustin Pop
                           iv_name = "sdb")
2354 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2355 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2356 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2357 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2358 923b1523 Iustin Pop
2359 923b1523 Iustin Pop
2360 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2361 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2362 a8083063 Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz,
2363 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2364 a8083063 Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz,
2365 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2366 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda",
2367 a8083063 Iustin Pop
                              size=disk_sz,
2368 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2369 a8083063 Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz,
2370 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2371 a8083063 Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz,
2372 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2373 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb",
2374 a8083063 Iustin Pop
                              size=swap_sz,
2375 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2376 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2377 a8083063 Iustin Pop
  elif template_name == "remote_raid1":
2378 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2379 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2380 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2381 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2382 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2383 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2384 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2385 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda",
2386 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2387 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2388 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2389 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb",
2390 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2391 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2392 a8083063 Iustin Pop
  else:
2393 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2394 a8083063 Iustin Pop
  return disks
2395 a8083063 Iustin Pop
2396 a8083063 Iustin Pop
2397 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2398 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2399 a0c3fea1 Michael Hanselmann
2400 a0c3fea1 Michael Hanselmann
2401 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2402 a8083063 Iustin Pop
  """Create all disks for an instance.
2403 a8083063 Iustin Pop

2404 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2405 a8083063 Iustin Pop

2406 a8083063 Iustin Pop
  Args:
2407 a8083063 Iustin Pop
    instance: the instance object
2408 a8083063 Iustin Pop

2409 a8083063 Iustin Pop
  Returns:
2410 a8083063 Iustin Pop
    True or False showing the success of the creation process
2411 a8083063 Iustin Pop

2412 a8083063 Iustin Pop
  """
2413 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2414 a0c3fea1 Michael Hanselmann
2415 a8083063 Iustin Pop
  for device in instance.disks:
2416 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2417 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2418 a8083063 Iustin Pop
    #HARDCODE
2419 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2420 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False,
2421 a0c3fea1 Michael Hanselmann
                                        info):
2422 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2423 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2424 a8083063 Iustin Pop
        return False
2425 a8083063 Iustin Pop
    #HARDCODE
2426 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device, info):
2427 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2428 a8083063 Iustin Pop
                   device.iv_name)
2429 a8083063 Iustin Pop
      return False
2430 a8083063 Iustin Pop
  return True
2431 a8083063 Iustin Pop
2432 a8083063 Iustin Pop
2433 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2434 a8083063 Iustin Pop
  """Remove all disks for an instance.
2435 a8083063 Iustin Pop

2436 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2437 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2438 a8083063 Iustin Pop
  be remove, the removal will continue with the other ones (compare
2439 a8083063 Iustin Pop
  with `_CreateDisks()`).
2440 a8083063 Iustin Pop

2441 a8083063 Iustin Pop
  Args:
2442 a8083063 Iustin Pop
    instance: the instance object
2443 a8083063 Iustin Pop

2444 a8083063 Iustin Pop
  Returns:
2445 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2446 a8083063 Iustin Pop

2447 a8083063 Iustin Pop
  """
2448 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2449 a8083063 Iustin Pop
2450 a8083063 Iustin Pop
  result = True
2451 a8083063 Iustin Pop
  for device in instance.disks:
2452 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2453 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2454 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2455 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2456 a8083063 Iustin Pop
                     " continuing anyway" %
2457 a8083063 Iustin Pop
                     (device.iv_name, node))
2458 a8083063 Iustin Pop
        result = False
2459 a8083063 Iustin Pop
  return result
2460 a8083063 Iustin Pop
2461 a8083063 Iustin Pop
2462 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2463 a8083063 Iustin Pop
  """Create an instance.
2464 a8083063 Iustin Pop

2465 a8083063 Iustin Pop
  """
2466 a8083063 Iustin Pop
  HPATH = "instance-add"
2467 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2468 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2469 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2470 a8083063 Iustin Pop
              "wait_for_sync"]
2471 a8083063 Iustin Pop
2472 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2473 a8083063 Iustin Pop
    """Build hooks env.
2474 a8083063 Iustin Pop

2475 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2476 a8083063 Iustin Pop

2477 a8083063 Iustin Pop
    """
2478 a8083063 Iustin Pop
    env = {
2479 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2480 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2481 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2482 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2483 a8083063 Iustin Pop
      }
2484 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2485 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2486 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2487 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2488 396e1b78 Michael Hanselmann
2489 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2490 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2491 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2492 396e1b78 Michael Hanselmann
      status=self.instance_status,
2493 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2494 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2495 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2496 396e1b78 Michael Hanselmann
      nics=[(self.inst_ip, self.op.bridge)],
2497 396e1b78 Michael Hanselmann
    ))
2498 a8083063 Iustin Pop
2499 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2500 a8083063 Iustin Pop
          self.secondaries)
2501 a8083063 Iustin Pop
    return env, nl, nl
2502 a8083063 Iustin Pop
2503 a8083063 Iustin Pop
2504 a8083063 Iustin Pop
  def CheckPrereq(self):
2505 a8083063 Iustin Pop
    """Check prerequisites.
2506 a8083063 Iustin Pop

2507 a8083063 Iustin Pop
    """
2508 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2509 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2510 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid instance creation mode '%s'" %
2511 a8083063 Iustin Pop
                                   self.op.mode)
2512 a8083063 Iustin Pop
2513 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2514 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2515 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2516 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2517 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Importing an instance requires source"
2518 a8083063 Iustin Pop
                                     " node and path options")
2519 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2520 a8083063 Iustin Pop
      if src_node_full is None:
2521 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Unknown source node '%s'" % src_node)
2522 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2523 a8083063 Iustin Pop
2524 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2525 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("The source path must be absolute")
2526 a8083063 Iustin Pop
2527 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2528 a8083063 Iustin Pop
2529 a8083063 Iustin Pop
      if not export_info:
2530 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("No export found in dir %s" % src_path)
2531 a8083063 Iustin Pop
2532 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2533 a8083063 Iustin Pop
        raise errors.ProgrammerError, ("Corrupted export config")
2534 a8083063 Iustin Pop
2535 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2536 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2537 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Wrong export version %s (wanted %d)" %
2538 a8083063 Iustin Pop
                                     (ei_version, constants.EXPORT_VERSION))
2539 a8083063 Iustin Pop
2540 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2541 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Can't import instance with more than"
2542 a8083063 Iustin Pop
                                     " one data disk")
2543 a8083063 Iustin Pop
2544 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2545 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2546 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2547 a8083063 Iustin Pop
                                                         'disk0_dump'))
2548 a8083063 Iustin Pop
      self.src_image = diskimage
2549 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2550 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2551 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("No guest OS specified")
2552 a8083063 Iustin Pop
2553 a8083063 Iustin Pop
    # check primary node
2554 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2555 a8083063 Iustin Pop
    if pnode is None:
2556 fe7b0351 Michael Hanselmann
      raise errors.OpPrereqError, ("Primary node '%s' is unknown" %
2557 a8083063 Iustin Pop
                                   self.op.pnode)
2558 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2559 a8083063 Iustin Pop
    self.pnode = pnode
2560 a8083063 Iustin Pop
    self.secondaries = []
2561 a8083063 Iustin Pop
    # disk template and mirror node verification
2562 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2563 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Invalid disk template name")
2564 a8083063 Iustin Pop
2565 a8083063 Iustin Pop
    if self.op.disk_template == constants.DT_REMOTE_RAID1:
2566 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2567 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("The 'remote_raid1' disk template needs"
2568 a8083063 Iustin Pop
                                     " a mirror node")
2569 a8083063 Iustin Pop
2570 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2571 a8083063 Iustin Pop
      if snode_name is None:
2572 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Unknown secondary node '%s'" %
2573 a8083063 Iustin Pop
                                     self.op.snode)
2574 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2575 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("The secondary node cannot be"
2576 a8083063 Iustin Pop
                                     " the primary node.")
2577 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2578 a8083063 Iustin Pop
2579 ed1ebc60 Guido Trotter
    # Check lv size requirements
2580 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2581 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2582 ed1ebc60 Guido Trotter
2583 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2584 ed1ebc60 Guido Trotter
    req_size_dict = {
2585 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2586 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2587 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2588 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2589 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2590 ed1ebc60 Guido Trotter
    }
2591 ed1ebc60 Guido Trotter
2592 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2593 ed1ebc60 Guido Trotter
      raise errors.ProgrammerError, ("Disk template '%s' size requirement"
2594 ed1ebc60 Guido Trotter
                                     " is unknown" %  self.op.disk_template)
2595 ed1ebc60 Guido Trotter
2596 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2597 ed1ebc60 Guido Trotter
2598 ed1ebc60 Guido Trotter
    for node in nodenames:
2599 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2600 ed1ebc60 Guido Trotter
      if not info:
2601 ed1ebc60 Guido Trotter
        raise errors.OpPrereqError, ("Cannot get current information"
2602 ed1ebc60 Guido Trotter
                                     " from node '%s'" % nodeinfo)
2603 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2604 ed1ebc60 Guido Trotter
        raise errors.OpPrereqError, ("Not enough disk space on target node %s."
2605 ed1ebc60 Guido Trotter
                                     " %d MB available, %d MB required" %
2606 ed1ebc60 Guido Trotter
                                     (node, info['vg_free'], req_size))
2607 ed1ebc60 Guido Trotter
2608 a8083063 Iustin Pop
    # os verification
2609 a8083063 Iustin Pop
    os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2610 a8083063 Iustin Pop
    if not isinstance(os_obj, objects.OS):
2611 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("OS '%s' not in supported os list for"
2612 a8083063 Iustin Pop
                                   " primary node"  % self.op.os_type)
2613 a8083063 Iustin Pop
2614 a8083063 Iustin Pop
    # instance verification
2615 a8083063 Iustin Pop
    hostname1 = utils.LookupHostname(self.op.instance_name)
2616 a8083063 Iustin Pop
    if not hostname1:
2617 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance name '%s' not found in dns" %
2618 a8083063 Iustin Pop
                                   self.op.instance_name)
2619 a8083063 Iustin Pop
2620 a8083063 Iustin Pop
    self.op.instance_name = instance_name = hostname1['hostname']
2621 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2622 a8083063 Iustin Pop
    if instance_name in instance_list:
2623 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' is already in the cluster" %
2624 a8083063 Iustin Pop
                                   instance_name)
2625 a8083063 Iustin Pop
2626 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2627 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2628 a8083063 Iustin Pop
      inst_ip = None
2629 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2630 a8083063 Iustin Pop
      inst_ip = hostname1['ip']
2631 a8083063 Iustin Pop
    else:
2632 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2633 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("given IP address '%s' doesn't look"
2634 a8083063 Iustin Pop
                                     " like a valid IP" % ip)
2635 a8083063 Iustin Pop
      inst_ip = ip
2636 a8083063 Iustin Pop
    self.inst_ip = inst_ip
2637 a8083063 Iustin Pop
2638 a8083063 Iustin Pop
    command = ["fping", "-q", hostname1['ip']]
2639 a8083063 Iustin Pop
    result = utils.RunCmd(command)
2640 a8083063 Iustin Pop
    if not result.failed:
2641 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("IP %s of instance %s already in use" %
2642 a8083063 Iustin Pop
                                   (hostname1['ip'], instance_name))
2643 a8083063 Iustin Pop
2644 a8083063 Iustin Pop
    # bridge verification
2645 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
2646 a8083063 Iustin Pop
    if bridge is None:
2647 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
2648 a8083063 Iustin Pop
    else:
2649 a8083063 Iustin Pop
      self.op.bridge = bridge
2650 a8083063 Iustin Pop
2651 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
2652 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("target bridge '%s' does not exist on"
2653 a8083063 Iustin Pop
                                   " destination node '%s'" %
2654 a8083063 Iustin Pop
                                   (self.op.bridge, pnode.name))
2655 a8083063 Iustin Pop
2656 a8083063 Iustin Pop
    if self.op.start:
2657 a8083063 Iustin Pop
      self.instance_status = 'up'
2658 a8083063 Iustin Pop
    else:
2659 a8083063 Iustin Pop
      self.instance_status = 'down'
2660 a8083063 Iustin Pop
2661 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2662 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
2663 a8083063 Iustin Pop

2664 a8083063 Iustin Pop
    """
2665 a8083063 Iustin Pop
    instance = self.op.instance_name
2666 a8083063 Iustin Pop
    pnode_name = self.pnode.name
2667 a8083063 Iustin Pop
2668 a8083063 Iustin Pop
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
2669 a8083063 Iustin Pop
    if self.inst_ip is not None:
2670 a8083063 Iustin Pop
      nic.ip = self.inst_ip
2671 a8083063 Iustin Pop
2672 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
2673 a8083063 Iustin Pop
                                  self.op.disk_template,
2674 a8083063 Iustin Pop
                                  instance, pnode_name,
2675 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
2676 a8083063 Iustin Pop
                                  self.op.swap_size)
2677 a8083063 Iustin Pop
2678 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
2679 a8083063 Iustin Pop
                            primary_node=pnode_name,
2680 a8083063 Iustin Pop
                            memory=self.op.mem_size,
2681 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
2682 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
2683 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
2684 a8083063 Iustin Pop
                            status=self.instance_status,
2685 a8083063 Iustin Pop
                            )
2686 a8083063 Iustin Pop
2687 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
2688 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
2689 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2690 a8083063 Iustin Pop
      raise errors.OpExecError, ("Device creation failed, reverting...")
2691 a8083063 Iustin Pop
2692 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
2693 a8083063 Iustin Pop
2694 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
2695 a8083063 Iustin Pop
2696 a8083063 Iustin Pop
    if self.op.wait_for_sync:
2697 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj)
2698 a8083063 Iustin Pop
    elif iobj.disk_template == "remote_raid1":
2699 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
2700 a8083063 Iustin Pop
      time.sleep(15)
2701 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
2702 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
2703 a8083063 Iustin Pop
    else:
2704 a8083063 Iustin Pop
      disk_abort = False
2705 a8083063 Iustin Pop
2706 a8083063 Iustin Pop
    if disk_abort:
2707 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2708 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
2709 a8083063 Iustin Pop
      raise errors.OpExecError, ("There are some degraded disks for"
2710 a8083063 Iustin Pop
                                      " this instance")
2711 a8083063 Iustin Pop
2712 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
2713 a8083063 Iustin Pop
                (instance, pnode_name))
2714 a8083063 Iustin Pop
2715 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
2716 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
2717 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
2718 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
2719 a8083063 Iustin Pop
          raise errors.OpExecError, ("could not add os for instance %s"
2720 a8083063 Iustin Pop
                                          " on node %s" %
2721 a8083063 Iustin Pop
                                          (instance, pnode_name))
2722 a8083063 Iustin Pop
2723 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
2724 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
2725 a8083063 Iustin Pop
        src_node = self.op.src_node
2726 a8083063 Iustin Pop
        src_image = self.src_image
2727 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
2728 a8083063 Iustin Pop
                                                src_node, src_image):
2729 a8083063 Iustin Pop
          raise errors.OpExecError, ("Could not import os for instance"
2730 a8083063 Iustin Pop
                                          " %s on node %s" %
2731 a8083063 Iustin Pop
                                          (instance, pnode_name))
2732 a8083063 Iustin Pop
      else:
2733 a8083063 Iustin Pop
        # also checked in the prereq part
2734 a8083063 Iustin Pop
        raise errors.ProgrammerError, ("Unknown OS initialization mode '%s'"
2735 a8083063 Iustin Pop
                                       % self.op.mode)
2736 a8083063 Iustin Pop
2737 a8083063 Iustin Pop
    if self.op.start:
2738 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
2739 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
2740 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
2741 a8083063 Iustin Pop
        raise errors.OpExecError, ("Could not start instance")
2742 a8083063 Iustin Pop
2743 a8083063 Iustin Pop
2744 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
2745 a8083063 Iustin Pop
  """Connect to an instance's console.
2746 a8083063 Iustin Pop

2747 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
2748 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
2749 a8083063 Iustin Pop
  console.
2750 a8083063 Iustin Pop

2751 a8083063 Iustin Pop
  """
2752 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2753 a8083063 Iustin Pop
2754 a8083063 Iustin Pop
  def CheckPrereq(self):
2755 a8083063 Iustin Pop
    """Check prerequisites.
2756 a8083063 Iustin Pop

2757 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2758 a8083063 Iustin Pop

2759 a8083063 Iustin Pop
    """
2760 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2761 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2762 a8083063 Iustin Pop
    if instance is None:
2763 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2764 a8083063 Iustin Pop
                                   self.op.instance_name)
2765 a8083063 Iustin Pop
    self.instance = instance
2766 a8083063 Iustin Pop
2767 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2768 a8083063 Iustin Pop
    """Connect to the console of an instance
2769 a8083063 Iustin Pop

2770 a8083063 Iustin Pop
    """
2771 a8083063 Iustin Pop
    instance = self.instance
2772 a8083063 Iustin Pop
    node = instance.primary_node
2773 a8083063 Iustin Pop
2774 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
2775 a8083063 Iustin Pop
    if node_insts is False:
2776 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't connect to node %s." % node)
2777 a8083063 Iustin Pop
2778 a8083063 Iustin Pop
    if instance.name not in node_insts:
2779 a8083063 Iustin Pop
      raise errors.OpExecError, ("Instance %s is not running." % instance.name)
2780 a8083063 Iustin Pop
2781 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
2782 a8083063 Iustin Pop
2783 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
2784 a8083063 Iustin Pop
    console_cmd = hyper.GetShellCommandForConsole(instance.name)
2785 a8083063 Iustin Pop
    return node, console_cmd
2786 a8083063 Iustin Pop
2787 a8083063 Iustin Pop
2788 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
2789 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
2790 a8083063 Iustin Pop

2791 a8083063 Iustin Pop
  """
2792 a8083063 Iustin Pop
  HPATH = "mirror-add"
2793 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2794 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
2795 a8083063 Iustin Pop
2796 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2797 a8083063 Iustin Pop
    """Build hooks env.
2798 a8083063 Iustin Pop

2799 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2800 a8083063 Iustin Pop

2801 a8083063 Iustin Pop
    """
2802 a8083063 Iustin Pop
    env = {
2803 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
2804 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
2805 a8083063 Iustin Pop
      }
2806 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2807 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
2808 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
2809 a8083063 Iustin Pop
    return env, nl, nl
2810 a8083063 Iustin Pop
2811 a8083063 Iustin Pop
  def CheckPrereq(self):
2812 a8083063 Iustin Pop
    """Check prerequisites.
2813 a8083063 Iustin Pop

2814 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2815 a8083063 Iustin Pop

2816 a8083063 Iustin Pop
    """
2817 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2818 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2819 a8083063 Iustin Pop
    if instance is None:
2820 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2821 a8083063 Iustin Pop
                                   self.op.instance_name)
2822 a8083063 Iustin Pop
    self.instance = instance
2823 a8083063 Iustin Pop
2824 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
2825 a8083063 Iustin Pop
    if remote_node is None:
2826 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' not known" % self.op.remote_node)
2827 a8083063 Iustin Pop
    self.remote_node = remote_node
2828 a8083063 Iustin Pop
2829 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
2830 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("The specified node is the primary node of"
2831 a8083063 Iustin Pop
                                   " the instance.")
2832 a8083063 Iustin Pop
2833 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2834 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance's disk layout is not"
2835 a8083063 Iustin Pop
                                   " remote_raid1.")
2836 a8083063 Iustin Pop
    for disk in instance.disks:
2837 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
2838 a8083063 Iustin Pop
        break
2839 a8083063 Iustin Pop
    else:
2840 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Can't find this device ('%s') in the"
2841 a8083063 Iustin Pop
                                   " instance." % self.op.disk_name)
2842 a8083063 Iustin Pop
    if len(disk.children) > 1:
2843 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("The device already has two slave"
2844 a8083063 Iustin Pop
                                   " devices.\n"
2845 a8083063 Iustin Pop
                                   "This would create a 3-disk raid1"
2846 a8083063 Iustin Pop
                                   " which we don't allow.")
2847 a8083063 Iustin Pop
    self.disk = disk
2848 a8083063 Iustin Pop
2849 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2850 a8083063 Iustin Pop
    """Add the mirror component
2851 a8083063 Iustin Pop

2852 a8083063 Iustin Pop
    """
2853 a8083063 Iustin Pop
    disk = self.disk
2854 a8083063 Iustin Pop
    instance = self.instance
2855 a8083063 Iustin Pop
2856 a8083063 Iustin Pop
    remote_node = self.remote_node
2857 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
2858 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
2859 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
2860 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
2861 a8083063 Iustin Pop
2862 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
2863 a8083063 Iustin Pop
    #HARDCODE
2864 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False,
2865 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
2866 a8083063 Iustin Pop
      raise errors.OpExecError, ("Failed to create new component on secondary"
2867 a8083063 Iustin Pop
                                 " node %s" % remote_node)
2868 a8083063 Iustin Pop
2869 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
2870 a8083063 Iustin Pop
    #HARDCODE
2871 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd,
2872 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
2873 a8083063 Iustin Pop
      # remove secondary dev
2874 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
2875 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
2876 a8083063 Iustin Pop
      raise errors.OpExecError, ("Failed to create volume on primary")
2877 a8083063 Iustin Pop
2878 a8083063 Iustin Pop
    # the device exists now
2879 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
2880 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
2881 a8083063 Iustin Pop
    if not rpc.call_blockdev_addchild(instance.primary_node,
2882 a8083063 Iustin Pop
                                           disk, new_drbd):
2883 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
2884 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
2885 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
2886 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
2887 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
2888 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
2889 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
2890 a8083063 Iustin Pop
      raise errors.OpExecError, "Can't add mirror component to md array"
2891 a8083063 Iustin Pop
2892 a8083063 Iustin Pop
    disk.children.append(new_drbd)
2893 a8083063 Iustin Pop
2894 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2895 a8083063 Iustin Pop
2896 a8083063 Iustin Pop
    _WaitForSync(self.cfg, instance)
2897 a8083063 Iustin Pop
2898 a8083063 Iustin Pop
    return 0
2899 a8083063 Iustin Pop
2900 a8083063 Iustin Pop
2901 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
2902 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
2903 a8083063 Iustin Pop

2904 a8083063 Iustin Pop
  """
2905 a8083063 Iustin Pop
  HPATH = "mirror-remove"
2906 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2907 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
2908 a8083063 Iustin Pop
2909 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2910 a8083063 Iustin Pop
    """Build hooks env.
2911 a8083063 Iustin Pop

2912 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2913 a8083063 Iustin Pop

2914 a8083063 Iustin Pop
    """
2915 a8083063 Iustin Pop
    env = {
2916 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
2917 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
2918 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
2919 a8083063 Iustin Pop
      }
2920 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2921 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
2922 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
2923 a8083063 Iustin Pop
    return env, nl, nl
2924 a8083063 Iustin Pop
2925 a8083063 Iustin Pop
  def CheckPrereq(self):
2926 a8083063 Iustin Pop
    """Check prerequisites.
2927 a8083063 Iustin Pop

2928 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2929 a8083063 Iustin Pop

2930 a8083063 Iustin Pop
    """
2931 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2932 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2933 a8083063 Iustin Pop
    if instance is None:
2934 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
2935 a8083063 Iustin Pop
                                   self.op.instance_name)
2936 a8083063 Iustin Pop
    self.instance = instance
2937 a8083063 Iustin Pop
2938 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2939 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance's disk layout is not"
2940 a8083063 Iustin Pop
                                   " remote_raid1.")
2941 a8083063 Iustin Pop
    for disk in instance.disks:
2942 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
2943 a8083063 Iustin Pop
        break
2944 a8083063 Iustin Pop
    else:
2945 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Can't find this device ('%s') in the"
2946 a8083063 Iustin Pop
                                   " instance." % self.op.disk_name)
2947 a8083063 Iustin Pop
    for child in disk.children:
2948 a8083063 Iustin Pop
      if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id:
2949 a8083063 Iustin Pop
        break
2950 a8083063 Iustin Pop
    else:
2951 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Can't find the device with this port.")
2952 a8083063 Iustin Pop
2953 a8083063 Iustin Pop
    if len(disk.children) < 2:
2954 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Cannot remove the last component from"
2955 a8083063 Iustin Pop
                                   " a mirror.")
2956 a8083063 Iustin Pop
    self.disk = disk
2957 a8083063 Iustin Pop
    self.child = child
2958 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
2959 a8083063 Iustin Pop
      oid = 1
2960 a8083063 Iustin Pop
    else:
2961 a8083063 Iustin Pop
      oid = 0
2962 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
2963 a8083063 Iustin Pop
2964 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2965 a8083063 Iustin Pop
    """Remove the mirror component
2966 a8083063 Iustin Pop

2967 a8083063 Iustin Pop
    """
2968 a8083063 Iustin Pop
    instance = self.instance
2969 a8083063 Iustin Pop
    disk = self.disk
2970 a8083063 Iustin Pop
    child = self.child
2971 a8083063 Iustin Pop
    logger.Info("remove mirror component")
2972 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
2973 a8083063 Iustin Pop
    if not rpc.call_blockdev_removechild(instance.primary_node,
2974 a8083063 Iustin Pop
                                              disk, child):
2975 a8083063 Iustin Pop
      raise errors.OpExecError, ("Can't remove child from mirror.")
2976 a8083063 Iustin Pop
2977 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
2978 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
2979 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
2980 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
2981 a8083063 Iustin Pop
                     " continuing operation." % node)
2982 a8083063 Iustin Pop
2983 a8083063 Iustin Pop
    disk.children.remove(child)
2984 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2985 a8083063 Iustin Pop
2986 a8083063 Iustin Pop
2987 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
2988 a8083063 Iustin Pop
  """Replace the disks of an instance.
2989 a8083063 Iustin Pop

2990 a8083063 Iustin Pop
  """
2991 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
2992 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2993 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2994 a8083063 Iustin Pop
2995 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2996 a8083063 Iustin Pop
    """Build hooks env.
2997 a8083063 Iustin Pop

2998 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
2999 a8083063 Iustin Pop

3000 a8083063 Iustin Pop
    """
3001 a8083063 Iustin Pop
    env = {
3002 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3003 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3004 a8083063 Iustin Pop
      }
3005 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3006 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3007 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3008 a8083063 Iustin Pop
    return env, nl, nl
3009 a8083063 Iustin Pop
3010 a8083063 Iustin Pop
  def CheckPrereq(self):
3011 a8083063 Iustin Pop
    """Check prerequisites.
3012 a8083063 Iustin Pop

3013 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3014 a8083063 Iustin Pop

3015 a8083063 Iustin Pop
    """
3016 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3017 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3018 a8083063 Iustin Pop
    if instance is None:
3019 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not known" %
3020 a8083063 Iustin Pop
                                   self.op.instance_name)
3021 a8083063 Iustin Pop
    self.instance = instance
3022 a8083063 Iustin Pop
3023 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3024 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance's disk layout is not"
3025 a8083063 Iustin Pop
                                   " remote_raid1.")
3026 a8083063 Iustin Pop
3027 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3028 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("The instance has a strange layout,"
3029 a8083063 Iustin Pop
                                   " expected one secondary but found %d" %
3030 a8083063 Iustin Pop
                                   len(instance.secondary_nodes))
3031 a8083063 Iustin Pop
3032 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3033 a8083063 Iustin Pop
    if remote_node is None:
3034 a8083063 Iustin Pop
      remote_node = instance.secondary_nodes[0]
3035 a8083063 Iustin Pop
    else:
3036 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3037 a8083063 Iustin Pop
      if remote_node is None:
3038 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Node '%s' not known" %
3039 a8083063 Iustin Pop
                                     self.op.remote_node)
3040 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3041 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("The specified node is the primary node of"
3042 a8083063 Iustin Pop
                                   " the instance.")
3043 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3044 a8083063 Iustin Pop
3045 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3046 a8083063 Iustin Pop
    """Replace the disks of an instance.
3047 a8083063 Iustin Pop

3048 a8083063 Iustin Pop
    """
3049 a8083063 Iustin Pop
    instance = self.instance
3050 a8083063 Iustin Pop
    iv_names = {}
3051 a8083063 Iustin Pop
    # start of work
3052 a8083063 Iustin Pop
    remote_node = self.op.remote_node
3053 a8083063 Iustin Pop
    cfg = self.cfg
3054 880478f8 Iustin Pop
    vgname = cfg.GetVGName()
3055 a8083063 Iustin Pop
    for dev in instance.disks:
3056 a8083063 Iustin Pop
      size = dev.size
3057 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3058 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3059 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3060 923b1523 Iustin Pop
                                       remote_node, size, names)
3061 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3062 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3063 a8083063 Iustin Pop
                  dev.iv_name)
3064 a8083063 Iustin Pop
      #HARDCODE
3065 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False,
3066 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3067 a8083063 Iustin Pop
        raise errors.OpExecError, ("Failed to create new component on"
3068 a8083063 Iustin Pop
                                   " secondary node %s\n"
3069 a8083063 Iustin Pop
                                   "Full abort, cleanup manually!" %
3070 a8083063 Iustin Pop
                                   remote_node)
3071 a8083063 Iustin Pop
3072 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3073 a8083063 Iustin Pop
      #HARDCODE
3074 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd,
3075 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3076 a8083063 Iustin Pop
        # remove secondary dev
3077 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3078 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3079 a8083063 Iustin Pop
        raise errors.OpExecError("Failed to create volume on primary!\n"
3080 a8083063 Iustin Pop
                                 "Full abort, cleanup manually!!")
3081 a8083063 Iustin Pop
3082 a8083063 Iustin Pop
      # the device exists now
3083 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3084 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3085 a8083063 Iustin Pop
      if not rpc.call_blockdev_addchild(instance.primary_node, dev,
3086 880478f8 Iustin Pop
                                        new_drbd):
3087 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3088 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3089 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3090 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3091 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3092 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3093 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3094 a8083063 Iustin Pop
        raise errors.OpExecError, ("Full abort, cleanup manually!!")
3095 a8083063 Iustin Pop
3096 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3097 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3098 a8083063 Iustin Pop
3099 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3100 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3101 a8083063 Iustin Pop
    # return value
3102 a8083063 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3103 a8083063 Iustin Pop
3104 a8083063 Iustin Pop
    # so check manually all the devices
3105 a8083063 Iustin Pop
    for name in iv_names:
3106 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3107 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3108 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3109 a8083063 Iustin Pop
      if is_degr:
3110 a8083063 Iustin Pop
        raise errors.OpExecError, ("MD device %s is degraded!" % name)
3111 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3112 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3113 a8083063 Iustin Pop
      if is_degr:
3114 a8083063 Iustin Pop
        raise errors.OpExecError, ("New drbd device %s is degraded!" % name)
3115 a8083063 Iustin Pop
3116 a8083063 Iustin Pop
    for name in iv_names:
3117 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3118 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3119 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3120 a8083063 Iustin Pop
      if not rpc.call_blockdev_removechild(instance.primary_node,
3121 a8083063 Iustin Pop
                                                dev, child):
3122 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3123 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3124 a8083063 Iustin Pop
        continue
3125 a8083063 Iustin Pop
3126 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3127 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3128 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3129 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3130 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3131 a8083063 Iustin Pop
                       " continuing operation." % node)
3132 a8083063 Iustin Pop
3133 a8083063 Iustin Pop
      dev.children.remove(child)
3134 a8083063 Iustin Pop
3135 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3136 a8083063 Iustin Pop
3137 a8083063 Iustin Pop
3138 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3139 a8083063 Iustin Pop
  """Query runtime instance data.
3140 a8083063 Iustin Pop

3141 a8083063 Iustin Pop
  """
3142 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3143 a8083063 Iustin Pop
3144 a8083063 Iustin Pop
  def CheckPrereq(self):
3145 a8083063 Iustin Pop
    """Check prerequisites.
3146 a8083063 Iustin Pop

3147 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3148 a8083063 Iustin Pop

3149 a8083063 Iustin Pop
    """
3150 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3151 a8083063 Iustin Pop
      raise errors.OpPrereqError, "Invalid argument type 'instances'"
3152 a8083063 Iustin Pop
    if self.op.instances:
3153 a8083063 Iustin Pop
      self.wanted_instances = []
3154 a8083063 Iustin Pop
      names = self.op.instances
3155 a8083063 Iustin Pop
      for name in names:
3156 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3157 a8083063 Iustin Pop
        if instance is None:
3158 a8083063 Iustin Pop
          raise errors.OpPrereqError, ("No such instance name '%s'" % name)
3159 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3160 a8083063 Iustin Pop
    else:
3161 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3162 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3163 a8083063 Iustin Pop
    return
3164 a8083063 Iustin Pop
3165 a8083063 Iustin Pop
3166 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3167 a8083063 Iustin Pop
    """Compute block device status.
3168 a8083063 Iustin Pop

3169 a8083063 Iustin Pop
    """
3170 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3171 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3172 a8083063 Iustin Pop
    if dev.dev_type == "drbd":
3173 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3174 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3175 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3176 a8083063 Iustin Pop
      else:
3177 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3178 a8083063 Iustin Pop
3179 a8083063 Iustin Pop
    if snode:
3180 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3181 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3182 a8083063 Iustin Pop
    else:
3183 a8083063 Iustin Pop
      dev_sstatus = None
3184 a8083063 Iustin Pop
3185 a8083063 Iustin Pop
    if dev.children:
3186 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3187 a8083063 Iustin Pop
                      for child in dev.children]
3188 a8083063 Iustin Pop
    else:
3189 a8083063 Iustin Pop
      dev_children = []
3190 a8083063 Iustin Pop
3191 a8083063 Iustin Pop
    data = {
3192 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3193 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3194 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3195 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3196 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3197 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3198 a8083063 Iustin Pop
      "children": dev_children,
3199 a8083063 Iustin Pop
      }
3200 a8083063 Iustin Pop
3201 a8083063 Iustin Pop
    return data
3202 a8083063 Iustin Pop
3203 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3204 a8083063 Iustin Pop
    """Gather and return data"""
3205 a8083063 Iustin Pop
    result = {}
3206 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3207 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3208 a8083063 Iustin Pop
                                                instance.name)
3209 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3210 a8083063 Iustin Pop
        remote_state = "up"
3211 a8083063 Iustin Pop
      else:
3212 a8083063 Iustin Pop
        remote_state = "down"
3213 a8083063 Iustin Pop
      if instance.status == "down":
3214 a8083063 Iustin Pop
        config_state = "down"
3215 a8083063 Iustin Pop
      else:
3216 a8083063 Iustin Pop
        config_state = "up"
3217 a8083063 Iustin Pop
3218 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3219 a8083063 Iustin Pop
               for device in instance.disks]
3220 a8083063 Iustin Pop
3221 a8083063 Iustin Pop
      idict = {
3222 a8083063 Iustin Pop
        "name": instance.name,
3223 a8083063 Iustin Pop
        "config_state": config_state,
3224 a8083063 Iustin Pop
        "run_state": remote_state,
3225 a8083063 Iustin Pop
        "pnode": instance.primary_node,
3226 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
3227 a8083063 Iustin Pop
        "os": instance.os,
3228 a8083063 Iustin Pop
        "memory": instance.memory,
3229 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3230 a8083063 Iustin Pop
        "disks": disks,
3231 a8083063 Iustin Pop
        }
3232 a8083063 Iustin Pop
3233 a8083063 Iustin Pop
      result[instance.name] = idict
3234 a8083063 Iustin Pop
3235 a8083063 Iustin Pop
    return result
3236 a8083063 Iustin Pop
3237 a8083063 Iustin Pop
3238 a8083063 Iustin Pop
class LUQueryNodeData(NoHooksLU):
3239 a8083063 Iustin Pop
  """Logical unit for querying node data.
3240 a8083063 Iustin Pop

3241 a8083063 Iustin Pop
  """
3242 a8083063 Iustin Pop
  _OP_REQP = ["nodes"]
3243 a8083063 Iustin Pop
3244 a8083063 Iustin Pop
  def CheckPrereq(self):
3245 a8083063 Iustin Pop
    """Check prerequisites.
3246 a8083063 Iustin Pop

3247 a8083063 Iustin Pop
    This only checks the optional node list against the existing names.
3248 a8083063 Iustin Pop

3249 a8083063 Iustin Pop
    """
3250 dcb93971 Michael Hanselmann
    self.wanted_nodes = _GetWantedNodes(self, self.op.nodes)
3251 a8083063 Iustin Pop
3252 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3253 a8083063 Iustin Pop
    """Compute and return the list of nodes.
3254 a8083063 Iustin Pop

3255 a8083063 Iustin Pop
    """
3256 a8083063 Iustin Pop
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
3257 a8083063 Iustin Pop
             in self.cfg.GetInstanceList()]
3258 a8083063 Iustin Pop
    result = []
3259 a8083063 Iustin Pop
    for node in self.wanted_nodes:
3260 a8083063 Iustin Pop
      result.append((node.name, node.primary_ip, node.secondary_ip,
3261 a8083063 Iustin Pop
                     [inst.name for inst in ilist
3262 a8083063 Iustin Pop
                      if inst.primary_node == node.name],
3263 a8083063 Iustin Pop
                     [inst.name for inst in ilist
3264 a8083063 Iustin Pop
                      if node.name in inst.secondary_nodes],
3265 a8083063 Iustin Pop
                     ))
3266 a8083063 Iustin Pop
    return result
3267 a8083063 Iustin Pop
3268 a8083063 Iustin Pop
3269 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
3270 a8083063 Iustin Pop
  """Modifies an instances's parameters.
3271 a8083063 Iustin Pop

3272 a8083063 Iustin Pop
  """
3273 a8083063 Iustin Pop
  HPATH = "instance-modify"
3274 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3275 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3276 a8083063 Iustin Pop
3277 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3278 a8083063 Iustin Pop
    """Build hooks env.
3279 a8083063 Iustin Pop

3280 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
3281 a8083063 Iustin Pop

3282 a8083063 Iustin Pop
    """
3283 396e1b78 Michael Hanselmann
    args = dict()
3284 a8083063 Iustin Pop
    if self.mem:
3285 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
3286 a8083063 Iustin Pop
    if self.vcpus:
3287 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
3288 396e1b78 Michael Hanselmann
    if self.do_ip or self.do_bridge:
3289 396e1b78 Michael Hanselmann
      if self.do_ip:
3290 396e1b78 Michael Hanselmann
        ip = self.ip
3291 396e1b78 Michael Hanselmann
      else:
3292 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
3293 396e1b78 Michael Hanselmann
      if self.bridge:
3294 396e1b78 Michael Hanselmann
        bridge = self.bridge
3295 396e1b78 Michael Hanselmann
      else:
3296 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
3297 396e1b78 Michael Hanselmann
      args['nics'] = [(ip, bridge)]
3298 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
3299 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3300 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3301 a8083063 Iustin Pop
    return env, nl, nl
3302 a8083063 Iustin Pop
3303 a8083063 Iustin Pop
  def CheckPrereq(self):
3304 a8083063 Iustin Pop
    """Check prerequisites.
3305 a8083063 Iustin Pop

3306 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
3307 a8083063 Iustin Pop

3308 a8083063 Iustin Pop
    """
3309 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
3310 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
3311 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
3312 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
3313 a8083063 Iustin Pop
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
3314 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("No changes submitted")
3315 a8083063 Iustin Pop
    if self.mem is not None:
3316 a8083063 Iustin Pop
      try:
3317 a8083063 Iustin Pop
        self.mem = int(self.mem)
3318 a8083063 Iustin Pop
      except ValueError, err:
3319 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Invalid memory size: %s" % str(err))
3320 a8083063 Iustin Pop
    if self.vcpus is not None:
3321 a8083063 Iustin Pop
      try:
3322 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
3323 a8083063 Iustin Pop
      except ValueError, err:
3324 a8083063 Iustin Pop
        raise errors.OpPrereqError, ("Invalid vcpus number: %s" % str(err))
3325 a8083063 Iustin Pop
    if self.ip is not None:
3326 a8083063 Iustin Pop
      self.do_ip = True
3327 a8083063 Iustin Pop
      if self.ip.lower() == "none":
3328 a8083063 Iustin Pop
        self.ip = None
3329 a8083063 Iustin Pop
      else:
3330 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
3331 a8083063 Iustin Pop
          raise errors.OpPrereqError, ("Invalid IP address '%s'." % self.ip)
3332 a8083063 Iustin Pop
    else:
3333 a8083063 Iustin Pop
      self.do_ip = False
3334 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
3335 a8083063 Iustin Pop
3336 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3337 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3338 a8083063 Iustin Pop
    if instance is None:
3339 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("No such instance name '%s'" %
3340 a8083063 Iustin Pop
                                   self.op.instance_name)
3341 a8083063 Iustin Pop
    self.op.instance_name = instance.name
3342 a8083063 Iustin Pop
    self.instance = instance
3343 a8083063 Iustin Pop
    return
3344 a8083063 Iustin Pop
3345 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3346 a8083063 Iustin Pop
    """Modifies an instance.
3347 a8083063 Iustin Pop

3348 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
3349 a8083063 Iustin Pop
    """
3350 a8083063 Iustin Pop
    result = []
3351 a8083063 Iustin Pop
    instance = self.instance
3352 a8083063 Iustin Pop
    if self.mem:
3353 a8083063 Iustin Pop
      instance.memory = self.mem
3354 a8083063 Iustin Pop
      result.append(("mem", self.mem))
3355 a8083063 Iustin Pop
    if self.vcpus:
3356 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
3357 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
3358 a8083063 Iustin Pop
    if self.do_ip:
3359 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
3360 a8083063 Iustin Pop
      result.append(("ip", self.ip))
3361 a8083063 Iustin Pop
    if self.bridge:
3362 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
3363 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
3364 a8083063 Iustin Pop
3365 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3366 a8083063 Iustin Pop
3367 a8083063 Iustin Pop
    return result
3368 a8083063 Iustin Pop
3369 a8083063 Iustin Pop
3370 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
3371 a8083063 Iustin Pop
  """Query the exports list
3372 a8083063 Iustin Pop

3373 a8083063 Iustin Pop
  """
3374 a8083063 Iustin Pop
  _OP_REQP = []
3375 a8083063 Iustin Pop
3376 a8083063 Iustin Pop
  def CheckPrereq(self):
3377 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
3378 a8083063 Iustin Pop

3379 a8083063 Iustin Pop
    """
3380 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
3381 a8083063 Iustin Pop
3382 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3383 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
3384 a8083063 Iustin Pop

3385 a8083063 Iustin Pop
    Returns:
3386 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
3387 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
3388 a8083063 Iustin Pop
      that node.
3389 a8083063 Iustin Pop

3390 a8083063 Iustin Pop
    """
3391 dcb93971 Michael Hanselmann
    return rpc.call_export_list([node.name for node in self.nodes])
3392 a8083063 Iustin Pop
3393 a8083063 Iustin Pop
3394 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
3395 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
3396 a8083063 Iustin Pop

3397 a8083063 Iustin Pop
  """
3398 a8083063 Iustin Pop
  HPATH = "instance-export"
3399 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3400 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
3401 a8083063 Iustin Pop
3402 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3403 a8083063 Iustin Pop
    """Build hooks env.
3404 a8083063 Iustin Pop

3405 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
3406 a8083063 Iustin Pop

3407 a8083063 Iustin Pop
    """
3408 a8083063 Iustin Pop
    env = {
3409 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
3410 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
3411 a8083063 Iustin Pop
      }
3412 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3413 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3414 a8083063 Iustin Pop
          self.op.target_node]
3415 a8083063 Iustin Pop
    return env, nl, nl
3416 a8083063 Iustin Pop
3417 a8083063 Iustin Pop
  def CheckPrereq(self):
3418 a8083063 Iustin Pop
    """Check prerequisites.
3419 a8083063 Iustin Pop

3420 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
3421 a8083063 Iustin Pop

3422 a8083063 Iustin Pop
    """
3423 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
3424 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
3425 a8083063 Iustin Pop
    if self.instance is None:
3426 a8083063 Iustin Pop
      raise errors.OpPrereqError, ("Instance '%s' not found" %
3427 a8083063 Iustin Pop
                                   self.op.instance_name)
3428 a8083063 Iustin Pop
3429 a8083063 Iustin Pop
    # node verification
3430 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
3431 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
3432 a8083063 Iustin Pop
3433 a8083063 Iustin Pop
    if self.dst_node is None:
3434 fe7b0351 Michael Hanselmann
      raise errors.OpPrereqError, ("Destination node '%s' is unknown." %
3435 a8083063 Iustin Pop
                                   self.op.target_node)
3436 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
3437 a8083063 Iustin Pop
3438 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3439 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
3440 a8083063 Iustin Pop

3441 a8083063 Iustin Pop
    """
3442 a8083063 Iustin Pop
    instance = self.instance
3443 a8083063 Iustin Pop
    dst_node = self.dst_node
3444 a8083063 Iustin Pop
    src_node = instance.primary_node
3445 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
3446 a8083063 Iustin Pop
    if self.op.shutdown:
3447 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
3448 a8083063 Iustin Pop
      self.processor.ChainOpCode(op, feedback_fn)
3449 a8083063 Iustin Pop
3450 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
3451 a8083063 Iustin Pop
3452 a8083063 Iustin Pop
    snap_disks = []
3453 a8083063 Iustin Pop
3454 a8083063 Iustin Pop
    try:
3455 a8083063 Iustin Pop
      for disk in instance.disks:
3456 a8083063 Iustin Pop
        if disk.iv_name == "sda":
3457 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
3458 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
3459 a8083063 Iustin Pop
3460 a8083063 Iustin Pop
          if not new_dev_name:
3461 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
3462 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
3463 a8083063 Iustin Pop
          else:
3464 a8083063 Iustin Pop
            new_dev = objects.Disk(dev_type="lvm", size=disk.size,
3465 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
3466 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
3467 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
3468 a8083063 Iustin Pop
            snap_disks.append(new_dev)
3469 a8083063 Iustin Pop
3470 a8083063 Iustin Pop
    finally:
3471 a8083063 Iustin Pop
      if self.op.shutdown:
3472 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
3473 a8083063 Iustin Pop
                                       force=False)
3474 a8083063 Iustin Pop
        self.processor.ChainOpCode(op, feedback_fn)
3475 a8083063 Iustin Pop
3476 a8083063 Iustin Pop
    # TODO: check for size
3477 a8083063 Iustin Pop
3478 a8083063 Iustin Pop
    for dev in snap_disks:
3479 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
3480 a8083063 Iustin Pop
                                           instance):
3481 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
3482 a8083063 Iustin Pop
                     " %s to node %s" %
3483 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
3484 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
3485 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
3486 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
3487 a8083063 Iustin Pop
3488 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
3489 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
3490 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
3491 a8083063 Iustin Pop
3492 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
3493 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
3494 a8083063 Iustin Pop
3495 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
3496 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
3497 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
3498 a8083063 Iustin Pop
    if nodelist:
3499 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
3500 a8083063 Iustin Pop
      exportlist = self.processor.ChainOpCode(op, feedback_fn)
3501 a8083063 Iustin Pop
      for node in exportlist:
3502 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
3503 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
3504 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
3505 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))