Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ bcf043c9

History | View | Annotate | Download (121.1 kB)

1 a8083063 Iustin Pop
#!/usr/bin/python
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import socket
30 a8083063 Iustin Pop
import time
31 a8083063 Iustin Pop
import tempfile
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 a8083063 Iustin Pop
35 a8083063 Iustin Pop
from ganeti import rpc
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 a8083063 Iustin Pop
from ganeti import config
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 a8083063 Iustin Pop
from ganeti import ssconf
46 a8083063 Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
52 a8083063 Iustin Pop
      with all the fields (even if as None)
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
57 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 a8083063 Iustin Pop
  REQ_CLUSTER = True
64 a8083063 Iustin Pop
  REQ_MASTER = True
65 a8083063 Iustin Pop
66 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
67 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
68 a8083063 Iustin Pop

69 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
70 a8083063 Iustin Pop
    validity.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    """
73 a8083063 Iustin Pop
    self.processor = processor
74 a8083063 Iustin Pop
    self.op = op
75 a8083063 Iustin Pop
    self.cfg = cfg
76 a8083063 Iustin Pop
    self.sstore = sstore
77 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
78 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
79 a8083063 Iustin Pop
      if attr_val is None:
80 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
81 3ecf6786 Iustin Pop
                                   attr_name)
82 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
83 a8083063 Iustin Pop
      if not cfg.IsCluster():
84 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
85 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
86 a8083063 Iustin Pop
      if self.REQ_MASTER:
87 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
88 a8083063 Iustin Pop
        if master != socket.gethostname():
89 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
90 3ecf6786 Iustin Pop
                                     " node %s" % master)
91 a8083063 Iustin Pop
92 a8083063 Iustin Pop
  def CheckPrereq(self):
93 a8083063 Iustin Pop
    """Check prerequisites for this LU.
94 a8083063 Iustin Pop

95 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
96 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
97 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
98 a8083063 Iustin Pop
    allowed.
99 a8083063 Iustin Pop

100 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
101 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
102 a8083063 Iustin Pop

103 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
104 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
105 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
106 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
107 a8083063 Iustin Pop

108 a8083063 Iustin Pop
    """
109 a8083063 Iustin Pop
    raise NotImplementedError
110 a8083063 Iustin Pop
111 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
112 a8083063 Iustin Pop
    """Execute the LU.
113 a8083063 Iustin Pop

114 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
115 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
116 a8083063 Iustin Pop
    code, or expected.
117 a8083063 Iustin Pop

118 a8083063 Iustin Pop
    """
119 a8083063 Iustin Pop
    raise NotImplementedError
120 a8083063 Iustin Pop
121 a8083063 Iustin Pop
  def BuildHooksEnv(self):
122 a8083063 Iustin Pop
    """Build hooks environment for this LU.
123 a8083063 Iustin Pop

124 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
125 a8083063 Iustin Pop
    containing the environment that will be used for running the
126 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
127 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
128 a8083063 Iustin Pop
    the hook should run after the execution.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
131 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
132 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
133 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
134 a8083063 Iustin Pop

135 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
136 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
137 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
138 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
139 a8083063 Iustin Pop
    None).
140 a8083063 Iustin Pop

141 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
142 a8083063 Iustin Pop
    not be called.
143 a8083063 Iustin Pop

144 a8083063 Iustin Pop
    """
145 a8083063 Iustin Pop
    raise NotImplementedError
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
148 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
149 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
150 a8083063 Iustin Pop

151 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
152 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
153 a8083063 Iustin Pop

154 a8083063 Iustin Pop
  """
155 a8083063 Iustin Pop
  HPATH = None
156 a8083063 Iustin Pop
  HTYPE = None
157 a8083063 Iustin Pop
158 a8083063 Iustin Pop
  def BuildHooksEnv(self):
159 a8083063 Iustin Pop
    """Build hooks env.
160 a8083063 Iustin Pop

161 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
162 a8083063 Iustin Pop

163 a8083063 Iustin Pop
    """
164 a8083063 Iustin Pop
    return
165 a8083063 Iustin Pop
166 a8083063 Iustin Pop
167 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
168 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
169 83120a01 Michael Hanselmann

170 83120a01 Michael Hanselmann
  Args:
171 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
172 83120a01 Michael Hanselmann

173 83120a01 Michael Hanselmann
  """
174 3312b702 Iustin Pop
  if not isinstance(nodes, list):
175 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
176 dcb93971 Michael Hanselmann
177 dcb93971 Michael Hanselmann
  if nodes:
178 3312b702 Iustin Pop
    wanted = []
179 dcb93971 Michael Hanselmann
180 dcb93971 Michael Hanselmann
    for name in nodes:
181 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
182 dcb93971 Michael Hanselmann
      if node is None:
183 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
184 3312b702 Iustin Pop
      wanted.append(node)
185 dcb93971 Michael Hanselmann
186 dcb93971 Michael Hanselmann
  else:
187 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
188 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
189 3312b702 Iustin Pop
190 3312b702 Iustin Pop
191 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
192 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
193 3312b702 Iustin Pop

194 3312b702 Iustin Pop
  Args:
195 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
196 3312b702 Iustin Pop

197 3312b702 Iustin Pop
  """
198 3312b702 Iustin Pop
  if not isinstance(instances, list):
199 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
200 3312b702 Iustin Pop
201 3312b702 Iustin Pop
  if instances:
202 3312b702 Iustin Pop
    wanted = []
203 3312b702 Iustin Pop
204 3312b702 Iustin Pop
    for name in instances:
205 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
206 3312b702 Iustin Pop
      if instance is None:
207 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
208 3312b702 Iustin Pop
      wanted.append(instance)
209 3312b702 Iustin Pop
210 3312b702 Iustin Pop
  else:
211 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
212 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
213 dcb93971 Michael Hanselmann
214 dcb93971 Michael Hanselmann
215 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
216 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
217 83120a01 Michael Hanselmann

218 83120a01 Michael Hanselmann
  Args:
219 83120a01 Michael Hanselmann
    static: Static fields
220 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
221 83120a01 Michael Hanselmann

222 83120a01 Michael Hanselmann
  """
223 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
224 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
225 dcb93971 Michael Hanselmann
226 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
227 dcb93971 Michael Hanselmann
228 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
229 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
230 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
231 3ecf6786 Iustin Pop
                                          difference(all_fields)))
232 dcb93971 Michael Hanselmann
233 dcb93971 Michael Hanselmann
234 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
235 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
236 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
237 ecb215b5 Michael Hanselmann

238 ecb215b5 Michael Hanselmann
  Args:
239 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
240 396e1b78 Michael Hanselmann
  """
241 396e1b78 Michael Hanselmann
  env = {
242 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
243 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
244 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
245 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
246 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
247 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
248 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
249 396e1b78 Michael Hanselmann
  }
250 396e1b78 Michael Hanselmann
251 396e1b78 Michael Hanselmann
  if nics:
252 396e1b78 Michael Hanselmann
    nic_count = len(nics)
253 396e1b78 Michael Hanselmann
    for idx, (ip, bridge) in enumerate(nics):
254 396e1b78 Michael Hanselmann
      if ip is None:
255 396e1b78 Michael Hanselmann
        ip = ""
256 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
257 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
258 396e1b78 Michael Hanselmann
  else:
259 396e1b78 Michael Hanselmann
    nic_count = 0
260 396e1b78 Michael Hanselmann
261 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
262 396e1b78 Michael Hanselmann
263 396e1b78 Michael Hanselmann
  return env
264 396e1b78 Michael Hanselmann
265 396e1b78 Michael Hanselmann
266 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
267 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
268 ecb215b5 Michael Hanselmann

269 ecb215b5 Michael Hanselmann
  Args:
270 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
271 ecb215b5 Michael Hanselmann
    override: dict of values to override
272 ecb215b5 Michael Hanselmann
  """
273 396e1b78 Michael Hanselmann
  args = {
274 396e1b78 Michael Hanselmann
    'name': instance.name,
275 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
276 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
277 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
278 396e1b78 Michael Hanselmann
    'status': instance.os,
279 396e1b78 Michael Hanselmann
    'memory': instance.memory,
280 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
281 396e1b78 Michael Hanselmann
    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
282 396e1b78 Michael Hanselmann
  }
283 396e1b78 Michael Hanselmann
  if override:
284 396e1b78 Michael Hanselmann
    args.update(override)
285 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
286 396e1b78 Michael Hanselmann
287 396e1b78 Michael Hanselmann
288 a8083063 Iustin Pop
def _UpdateEtcHosts(fullnode, ip):
289 a8083063 Iustin Pop
  """Ensure a node has a correct entry in /etc/hosts.
290 a8083063 Iustin Pop

291 a8083063 Iustin Pop
  Args:
292 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
293 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
294 a8083063 Iustin Pop

295 a8083063 Iustin Pop
  """
296 a8083063 Iustin Pop
  node = fullnode.split(".", 1)[0]
297 a8083063 Iustin Pop
298 a8083063 Iustin Pop
  f = open('/etc/hosts', 'r+')
299 a8083063 Iustin Pop
300 a8083063 Iustin Pop
  inthere = False
301 a8083063 Iustin Pop
302 a8083063 Iustin Pop
  save_lines = []
303 a8083063 Iustin Pop
  add_lines = []
304 a8083063 Iustin Pop
  removed = False
305 a8083063 Iustin Pop
306 a8083063 Iustin Pop
  while True:
307 a8083063 Iustin Pop
    rawline = f.readline()
308 a8083063 Iustin Pop
309 a8083063 Iustin Pop
    if not rawline:
310 a8083063 Iustin Pop
      # End of file
311 a8083063 Iustin Pop
      break
312 a8083063 Iustin Pop
313 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
314 a8083063 Iustin Pop
315 a8083063 Iustin Pop
    # Strip off comments
316 a8083063 Iustin Pop
    line = line.split('#')[0]
317 a8083063 Iustin Pop
318 a8083063 Iustin Pop
    if not line:
319 a8083063 Iustin Pop
      # Entire line was comment, skip
320 a8083063 Iustin Pop
      save_lines.append(rawline)
321 a8083063 Iustin Pop
      continue
322 a8083063 Iustin Pop
323 a8083063 Iustin Pop
    fields = line.split()
324 a8083063 Iustin Pop
325 a8083063 Iustin Pop
    haveall = True
326 a8083063 Iustin Pop
    havesome = False
327 a8083063 Iustin Pop
    for spec in [ ip, fullnode, node ]:
328 a8083063 Iustin Pop
      if spec not in fields:
329 a8083063 Iustin Pop
        haveall = False
330 a8083063 Iustin Pop
      if spec in fields:
331 a8083063 Iustin Pop
        havesome = True
332 a8083063 Iustin Pop
333 a8083063 Iustin Pop
    if haveall:
334 a8083063 Iustin Pop
      inthere = True
335 a8083063 Iustin Pop
      save_lines.append(rawline)
336 a8083063 Iustin Pop
      continue
337 a8083063 Iustin Pop
338 a8083063 Iustin Pop
    if havesome and not haveall:
339 a8083063 Iustin Pop
      # Line (old, or manual?) which is missing some.  Remove.
340 a8083063 Iustin Pop
      removed = True
341 a8083063 Iustin Pop
      continue
342 a8083063 Iustin Pop
343 a8083063 Iustin Pop
    save_lines.append(rawline)
344 a8083063 Iustin Pop
345 a8083063 Iustin Pop
  if not inthere:
346 a8083063 Iustin Pop
    add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
347 a8083063 Iustin Pop
348 a8083063 Iustin Pop
  if removed:
349 a8083063 Iustin Pop
    if add_lines:
350 a8083063 Iustin Pop
      save_lines = save_lines + add_lines
351 a8083063 Iustin Pop
352 a8083063 Iustin Pop
    # We removed a line, write a new file and replace old.
353 a8083063 Iustin Pop
    fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc')
354 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
355 a8083063 Iustin Pop
    newfile.write(''.join(save_lines))
356 a8083063 Iustin Pop
    newfile.close()
357 a8083063 Iustin Pop
    os.rename(tmpname, '/etc/hosts')
358 a8083063 Iustin Pop
359 a8083063 Iustin Pop
  elif add_lines:
360 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
361 a8083063 Iustin Pop
    f.seek(0, 2)
362 a8083063 Iustin Pop
    for add in add_lines:
363 a8083063 Iustin Pop
      f.write(add)
364 a8083063 Iustin Pop
365 a8083063 Iustin Pop
  f.close()
366 a8083063 Iustin Pop
367 a8083063 Iustin Pop
368 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
369 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
370 a8083063 Iustin Pop

371 a8083063 Iustin Pop
  Args:
372 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
373 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
374 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
375 a8083063 Iustin Pop

376 a8083063 Iustin Pop
  """
377 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
378 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
379 a8083063 Iustin Pop
  else:
380 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
381 a8083063 Iustin Pop
382 a8083063 Iustin Pop
  inthere = False
383 a8083063 Iustin Pop
384 a8083063 Iustin Pop
  save_lines = []
385 a8083063 Iustin Pop
  add_lines = []
386 a8083063 Iustin Pop
  removed = False
387 a8083063 Iustin Pop
388 a8083063 Iustin Pop
  while True:
389 a8083063 Iustin Pop
    rawline = f.readline()
390 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
391 a8083063 Iustin Pop
392 a8083063 Iustin Pop
    if not rawline:
393 a8083063 Iustin Pop
      # End of file
394 a8083063 Iustin Pop
      break
395 a8083063 Iustin Pop
396 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
397 a8083063 Iustin Pop
398 a8083063 Iustin Pop
    parts = line.split(' ')
399 a8083063 Iustin Pop
    fields = parts[0].split(',')
400 a8083063 Iustin Pop
    key = parts[2]
401 a8083063 Iustin Pop
402 a8083063 Iustin Pop
    haveall = True
403 a8083063 Iustin Pop
    havesome = False
404 a8083063 Iustin Pop
    for spec in [ ip, fullnode ]:
405 a8083063 Iustin Pop
      if spec not in fields:
406 a8083063 Iustin Pop
        haveall = False
407 a8083063 Iustin Pop
      if spec in fields:
408 a8083063 Iustin Pop
        havesome = True
409 a8083063 Iustin Pop
410 a8083063 Iustin Pop
    logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
411 a8083063 Iustin Pop
    if haveall and key == pubkey:
412 a8083063 Iustin Pop
      inthere = True
413 a8083063 Iustin Pop
      save_lines.append(rawline)
414 a8083063 Iustin Pop
      logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
415 a8083063 Iustin Pop
      continue
416 a8083063 Iustin Pop
417 a8083063 Iustin Pop
    if havesome and (not haveall or key != pubkey):
418 a8083063 Iustin Pop
      removed = True
419 a8083063 Iustin Pop
      logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
420 a8083063 Iustin Pop
      continue
421 a8083063 Iustin Pop
422 a8083063 Iustin Pop
    save_lines.append(rawline)
423 a8083063 Iustin Pop
424 a8083063 Iustin Pop
  if not inthere:
425 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
426 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
427 a8083063 Iustin Pop
428 a8083063 Iustin Pop
  if removed:
429 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
430 a8083063 Iustin Pop
431 a8083063 Iustin Pop
    # Write a new file and replace old.
432 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
433 82122173 Iustin Pop
                                   constants.DATA_DIR)
434 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
435 82122173 Iustin Pop
    try:
436 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
437 82122173 Iustin Pop
    finally:
438 82122173 Iustin Pop
      newfile.close()
439 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
440 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
441 a8083063 Iustin Pop
442 a8083063 Iustin Pop
  elif add_lines:
443 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
444 a8083063 Iustin Pop
    f.seek(0, 2)
445 a8083063 Iustin Pop
    for add in add_lines:
446 a8083063 Iustin Pop
      f.write(add)
447 a8083063 Iustin Pop
448 a8083063 Iustin Pop
  f.close()
449 a8083063 Iustin Pop
450 a8083063 Iustin Pop
451 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
452 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
453 a8083063 Iustin Pop

454 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
455 a8083063 Iustin Pop
  is the error message.
456 a8083063 Iustin Pop

457 a8083063 Iustin Pop
  """
458 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
459 a8083063 Iustin Pop
  if vgsize is None:
460 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
461 a8083063 Iustin Pop
  elif vgsize < 20480:
462 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
463 191a8385 Guido Trotter
            (vgname, vgsize))
464 a8083063 Iustin Pop
  return None
465 a8083063 Iustin Pop
466 a8083063 Iustin Pop
467 a8083063 Iustin Pop
def _InitSSHSetup(node):
468 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
469 a8083063 Iustin Pop

470 a8083063 Iustin Pop

471 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
472 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
473 a8083063 Iustin Pop

474 a8083063 Iustin Pop
  Args:
475 a8083063 Iustin Pop
    node: the name of this host as a fqdn
476 a8083063 Iustin Pop

477 a8083063 Iustin Pop
  """
478 a8083063 Iustin Pop
  if os.path.exists('/root/.ssh/id_dsa'):
479 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa')
480 a8083063 Iustin Pop
  if os.path.exists('/root/.ssh/id_dsa.pub'):
481 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa.pub')
482 a8083063 Iustin Pop
483 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/id_dsa')
484 a8083063 Iustin Pop
  utils.RemoveFile('/root/.ssh/id_dsa.pub')
485 a8083063 Iustin Pop
486 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
487 a8083063 Iustin Pop
                         "-f", "/root/.ssh/id_dsa",
488 a8083063 Iustin Pop
                         "-q", "-N", ""])
489 a8083063 Iustin Pop
  if result.failed:
490 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
491 3ecf6786 Iustin Pop
                             result.output)
492 a8083063 Iustin Pop
493 a8083063 Iustin Pop
  f = open('/root/.ssh/id_dsa.pub', 'r')
494 a8083063 Iustin Pop
  try:
495 a8083063 Iustin Pop
    utils.AddAuthorizedKey('/root/.ssh/authorized_keys', f.read(8192))
496 a8083063 Iustin Pop
  finally:
497 a8083063 Iustin Pop
    f.close()
498 a8083063 Iustin Pop
499 a8083063 Iustin Pop
500 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
501 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
502 a8083063 Iustin Pop

503 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
504 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
505 a8083063 Iustin Pop

506 a8083063 Iustin Pop
  """
507 a8083063 Iustin Pop
  # Create pseudo random password
508 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
509 a8083063 Iustin Pop
  # and write it into sstore
510 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
511 a8083063 Iustin Pop
512 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
513 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
514 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
515 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
516 a8083063 Iustin Pop
  if result.failed:
517 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
518 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
519 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
520 a8083063 Iustin Pop
521 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
522 a8083063 Iustin Pop
523 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
524 a8083063 Iustin Pop
525 a8083063 Iustin Pop
  if result.failed:
526 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
527 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
528 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
529 a8083063 Iustin Pop
530 a8083063 Iustin Pop
531 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
532 a8083063 Iustin Pop
  """Initialise the cluster.
533 a8083063 Iustin Pop

534 a8083063 Iustin Pop
  """
535 a8083063 Iustin Pop
  HPATH = "cluster-init"
536 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
537 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
538 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
539 a8083063 Iustin Pop
  REQ_CLUSTER = False
540 a8083063 Iustin Pop
541 a8083063 Iustin Pop
  def BuildHooksEnv(self):
542 a8083063 Iustin Pop
    """Build hooks env.
543 a8083063 Iustin Pop

544 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
545 a8083063 Iustin Pop
    ourselves in the post-run node list.
546 a8083063 Iustin Pop

547 a8083063 Iustin Pop
    """
548 396e1b78 Michael Hanselmann
    env = {
549 396e1b78 Michael Hanselmann
      "CLUSTER": self.op.cluster_name,
550 bcf043c9 Iustin Pop
      "MASTER": self.hostname.name,
551 396e1b78 Michael Hanselmann
      }
552 bcf043c9 Iustin Pop
    return env, [], [self.hostname.name]
553 a8083063 Iustin Pop
554 a8083063 Iustin Pop
  def CheckPrereq(self):
555 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
556 a8083063 Iustin Pop

557 a8083063 Iustin Pop
    """
558 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
559 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
560 a8083063 Iustin Pop
561 a8083063 Iustin Pop
    hostname_local = socket.gethostname()
562 a8083063 Iustin Pop
    self.hostname = hostname = utils.LookupHostname(hostname_local)
563 a8083063 Iustin Pop
    if not hostname:
564 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot resolve my own hostname ('%s')" %
565 3ecf6786 Iustin Pop
                                 hostname_local)
566 a8083063 Iustin Pop
567 bcf043c9 Iustin Pop
    if hostname.name != hostname_local:
568 ff98055b Iustin Pop
      raise errors.OpPrereqError("My own hostname (%s) does not match the"
569 ff98055b Iustin Pop
                                 " resolver (%s): probably not using FQDN"
570 ff98055b Iustin Pop
                                 " for hostname." %
571 bcf043c9 Iustin Pop
                                 (hostname_local, hostname.name))
572 ff98055b Iustin Pop
573 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
574 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
575 130e907e Iustin Pop
                                 " range (%s). Please fix DNS or /etc/hosts." %
576 bcf043c9 Iustin Pop
                                 (hostname.ip,))
577 130e907e Iustin Pop
578 a8083063 Iustin Pop
    self.clustername = clustername = utils.LookupHostname(self.op.cluster_name)
579 a8083063 Iustin Pop
    if not clustername:
580 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot resolve given cluster name ('%s')"
581 3ecf6786 Iustin Pop
                                 % self.op.cluster_name)
582 a8083063 Iustin Pop
583 bcf043c9 Iustin Pop
    result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", hostname.ip])
584 a8083063 Iustin Pop
    if result.failed:
585 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
586 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
587 3ecf6786 Iustin Pop
                                 " belong to this host."
588 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
589 a8083063 Iustin Pop
590 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
591 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
592 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
593 bcf043c9 Iustin Pop
    if secondary_ip and secondary_ip != hostname.ip:
594 a8083063 Iustin Pop
      result = utils.RunCmd(["fping", "-S127.0.0.1", "-q", secondary_ip])
595 a8083063 Iustin Pop
      if result.failed:
596 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("You gave %s as secondary IP,\n"
597 3ecf6786 Iustin Pop
                                   "but it does not belong to this host." %
598 3ecf6786 Iustin Pop
                                   secondary_ip)
599 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
600 a8083063 Iustin Pop
601 a8083063 Iustin Pop
    # checks presence of the volume group given
602 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
603 a8083063 Iustin Pop
604 a8083063 Iustin Pop
    if vgstatus:
605 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
606 a8083063 Iustin Pop
607 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
608 a8083063 Iustin Pop
                    self.op.mac_prefix):
609 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
610 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
611 a8083063 Iustin Pop
612 a8083063 Iustin Pop
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
613 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
614 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
615 a8083063 Iustin Pop
616 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
617 880478f8 Iustin Pop
    if result.failed:
618 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
619 8925faaa Iustin Pop
                                 (self.op.master_netdev,
620 8925faaa Iustin Pop
                                  result.output.strip()))
621 880478f8 Iustin Pop
622 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
623 a8083063 Iustin Pop
    """Initialize the cluster.
624 a8083063 Iustin Pop

625 a8083063 Iustin Pop
    """
626 a8083063 Iustin Pop
    clustername = self.clustername
627 a8083063 Iustin Pop
    hostname = self.hostname
628 a8083063 Iustin Pop
629 a8083063 Iustin Pop
    # set up the simple store
630 a8083063 Iustin Pop
    ss = ssconf.SimpleStore()
631 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
632 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
633 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
634 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
635 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
636 a8083063 Iustin Pop
637 a8083063 Iustin Pop
    # set up the inter-node password and certificate
638 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
639 a8083063 Iustin Pop
640 a8083063 Iustin Pop
    # start the master ip
641 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
642 a8083063 Iustin Pop
643 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
644 a8083063 Iustin Pop
    f = open('/etc/ssh/ssh_host_rsa_key.pub', 'r')
645 a8083063 Iustin Pop
    try:
646 a8083063 Iustin Pop
      sshline = f.read()
647 a8083063 Iustin Pop
    finally:
648 a8083063 Iustin Pop
      f.close()
649 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
650 a8083063 Iustin Pop
651 bcf043c9 Iustin Pop
    _UpdateEtcHosts(hostname.name, hostname.ip)
652 a8083063 Iustin Pop
653 bcf043c9 Iustin Pop
    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
654 a8083063 Iustin Pop
655 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
656 a8083063 Iustin Pop
657 a8083063 Iustin Pop
    # init of cluster config file
658 a8083063 Iustin Pop
    cfgw = config.ConfigWriter()
659 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
660 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
661 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
662 a8083063 Iustin Pop
663 a8083063 Iustin Pop
664 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
665 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
666 a8083063 Iustin Pop

667 a8083063 Iustin Pop
  """
668 a8083063 Iustin Pop
  _OP_REQP = []
669 a8083063 Iustin Pop
670 a8083063 Iustin Pop
  def CheckPrereq(self):
671 a8083063 Iustin Pop
    """Check prerequisites.
672 a8083063 Iustin Pop

673 a8083063 Iustin Pop
    This checks whether the cluster is empty.
674 a8083063 Iustin Pop

675 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
676 a8083063 Iustin Pop

677 a8083063 Iustin Pop
    """
678 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
679 a8083063 Iustin Pop
680 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
681 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
682 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
683 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
684 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
685 db915bd1 Michael Hanselmann
    if instancelist:
686 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
687 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
688 a8083063 Iustin Pop
689 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
690 a8083063 Iustin Pop
    """Destroys the cluster.
691 a8083063 Iustin Pop

692 a8083063 Iustin Pop
    """
693 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa')
694 a8083063 Iustin Pop
    utils.CreateBackup('/root/.ssh/id_dsa.pub')
695 880478f8 Iustin Pop
    rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
696 a8083063 Iustin Pop
697 a8083063 Iustin Pop
698 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
699 a8083063 Iustin Pop
  """Verifies the cluster status.
700 a8083063 Iustin Pop

701 a8083063 Iustin Pop
  """
702 a8083063 Iustin Pop
  _OP_REQP = []
703 a8083063 Iustin Pop
704 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
705 a8083063 Iustin Pop
                  remote_version, feedback_fn):
706 a8083063 Iustin Pop
    """Run multiple tests against a node.
707 a8083063 Iustin Pop

708 a8083063 Iustin Pop
    Test list:
709 a8083063 Iustin Pop
      - compares ganeti version
710 a8083063 Iustin Pop
      - checks vg existance and size > 20G
711 a8083063 Iustin Pop
      - checks config file checksum
712 a8083063 Iustin Pop
      - checks ssh to other nodes
713 a8083063 Iustin Pop

714 a8083063 Iustin Pop
    Args:
715 a8083063 Iustin Pop
      node: name of the node to check
716 a8083063 Iustin Pop
      file_list: required list of files
717 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
718 098c0958 Michael Hanselmann

719 a8083063 Iustin Pop
    """
720 a8083063 Iustin Pop
    # compares ganeti version
721 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
722 a8083063 Iustin Pop
    if not remote_version:
723 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
724 a8083063 Iustin Pop
      return True
725 a8083063 Iustin Pop
726 a8083063 Iustin Pop
    if local_version != remote_version:
727 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
728 a8083063 Iustin Pop
                      (local_version, node, remote_version))
729 a8083063 Iustin Pop
      return True
730 a8083063 Iustin Pop
731 a8083063 Iustin Pop
    # checks vg existance and size > 20G
732 a8083063 Iustin Pop
733 a8083063 Iustin Pop
    bad = False
734 a8083063 Iustin Pop
    if not vglist:
735 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
736 a8083063 Iustin Pop
                      (node,))
737 a8083063 Iustin Pop
      bad = True
738 a8083063 Iustin Pop
    else:
739 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
740 a8083063 Iustin Pop
      if vgstatus:
741 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
742 a8083063 Iustin Pop
        bad = True
743 a8083063 Iustin Pop
744 a8083063 Iustin Pop
    # checks config file checksum
745 a8083063 Iustin Pop
    # checks ssh to any
746 a8083063 Iustin Pop
747 a8083063 Iustin Pop
    if 'filelist' not in node_result:
748 a8083063 Iustin Pop
      bad = True
749 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
750 a8083063 Iustin Pop
    else:
751 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
752 a8083063 Iustin Pop
      for file_name in file_list:
753 a8083063 Iustin Pop
        if file_name not in remote_cksum:
754 a8083063 Iustin Pop
          bad = True
755 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
756 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
757 a8083063 Iustin Pop
          bad = True
758 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
759 a8083063 Iustin Pop
760 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
761 a8083063 Iustin Pop
      bad = True
762 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
763 a8083063 Iustin Pop
    else:
764 a8083063 Iustin Pop
      if node_result['nodelist']:
765 a8083063 Iustin Pop
        bad = True
766 a8083063 Iustin Pop
        for node in node_result['nodelist']:
767 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
768 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
769 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
770 a8083063 Iustin Pop
    if hyp_result is not None:
771 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
772 a8083063 Iustin Pop
    return bad
773 a8083063 Iustin Pop
774 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
775 a8083063 Iustin Pop
    """Verify an instance.
776 a8083063 Iustin Pop

777 a8083063 Iustin Pop
    This function checks to see if the required block devices are
778 a8083063 Iustin Pop
    available on the instance's node.
779 a8083063 Iustin Pop

780 a8083063 Iustin Pop
    """
781 a8083063 Iustin Pop
    bad = False
782 a8083063 Iustin Pop
783 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
784 a8083063 Iustin Pop
    if not instance in instancelist:
785 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
786 a8083063 Iustin Pop
                      (instance, instancelist))
787 a8083063 Iustin Pop
      bad = True
788 a8083063 Iustin Pop
789 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
790 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
791 a8083063 Iustin Pop
792 a8083063 Iustin Pop
    node_vol_should = {}
793 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
794 a8083063 Iustin Pop
795 a8083063 Iustin Pop
    for node in node_vol_should:
796 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
797 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
798 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
799 a8083063 Iustin Pop
                          (volume, node))
800 a8083063 Iustin Pop
          bad = True
801 a8083063 Iustin Pop
802 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
803 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
804 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
805 a8083063 Iustin Pop
                        (instance, node_current))
806 a8083063 Iustin Pop
        bad = True
807 a8083063 Iustin Pop
808 a8083063 Iustin Pop
    for node in node_instance:
809 a8083063 Iustin Pop
      if (not node == node_current):
810 a8083063 Iustin Pop
        if instance in node_instance[node]:
811 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
812 a8083063 Iustin Pop
                          (instance, node))
813 a8083063 Iustin Pop
          bad = True
814 a8083063 Iustin Pop
815 a8083063 Iustin Pop
    return not bad
816 a8083063 Iustin Pop
817 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
818 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
819 a8083063 Iustin Pop

820 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
821 a8083063 Iustin Pop
    reported as unknown.
822 a8083063 Iustin Pop

823 a8083063 Iustin Pop
    """
824 a8083063 Iustin Pop
    bad = False
825 a8083063 Iustin Pop
826 a8083063 Iustin Pop
    for node in node_vol_is:
827 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
828 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
829 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
830 a8083063 Iustin Pop
                      (volume, node))
831 a8083063 Iustin Pop
          bad = True
832 a8083063 Iustin Pop
    return bad
833 a8083063 Iustin Pop
834 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
835 a8083063 Iustin Pop
    """Verify the list of running instances.
836 a8083063 Iustin Pop

837 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
838 a8083063 Iustin Pop

839 a8083063 Iustin Pop
    """
840 a8083063 Iustin Pop
    bad = False
841 a8083063 Iustin Pop
    for node in node_instance:
842 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
843 a8083063 Iustin Pop
        if runninginstance not in instancelist:
844 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
845 a8083063 Iustin Pop
                          (runninginstance, node))
846 a8083063 Iustin Pop
          bad = True
847 a8083063 Iustin Pop
    return bad
848 a8083063 Iustin Pop
849 a8083063 Iustin Pop
  def CheckPrereq(self):
850 a8083063 Iustin Pop
    """Check prerequisites.
851 a8083063 Iustin Pop

852 a8083063 Iustin Pop
    This has no prerequisites.
853 a8083063 Iustin Pop

854 a8083063 Iustin Pop
    """
855 a8083063 Iustin Pop
    pass
856 a8083063 Iustin Pop
857 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
858 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
859 a8083063 Iustin Pop

860 a8083063 Iustin Pop
    """
861 a8083063 Iustin Pop
    bad = False
862 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
863 a8083063 Iustin Pop
    self.cfg.VerifyConfig()
864 a8083063 Iustin Pop
865 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
866 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
867 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
868 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
869 a8083063 Iustin Pop
    node_volume = {}
870 a8083063 Iustin Pop
    node_instance = {}
871 a8083063 Iustin Pop
872 a8083063 Iustin Pop
    # FIXME: verify OS list
873 a8083063 Iustin Pop
    # do local checksums
874 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
875 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
876 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
877 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
878 a8083063 Iustin Pop
879 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
880 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
881 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
882 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
883 a8083063 Iustin Pop
    node_verify_param = {
884 a8083063 Iustin Pop
      'filelist': file_names,
885 a8083063 Iustin Pop
      'nodelist': nodelist,
886 a8083063 Iustin Pop
      'hypervisor': None,
887 a8083063 Iustin Pop
      }
888 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
889 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
890 a8083063 Iustin Pop
891 a8083063 Iustin Pop
    for node in nodelist:
892 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
893 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
894 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
895 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
896 a8083063 Iustin Pop
      bad = bad or result
897 a8083063 Iustin Pop
898 a8083063 Iustin Pop
      # node_volume
899 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
900 a8083063 Iustin Pop
901 a8083063 Iustin Pop
      if type(volumeinfo) != dict:
902 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
903 a8083063 Iustin Pop
        bad = True
904 a8083063 Iustin Pop
        continue
905 a8083063 Iustin Pop
906 a8083063 Iustin Pop
      node_volume[node] = volumeinfo
907 a8083063 Iustin Pop
908 a8083063 Iustin Pop
      # node_instance
909 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
910 a8083063 Iustin Pop
      if type(nodeinstance) != list:
911 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
912 a8083063 Iustin Pop
        bad = True
913 a8083063 Iustin Pop
        continue
914 a8083063 Iustin Pop
915 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
916 a8083063 Iustin Pop
917 a8083063 Iustin Pop
    node_vol_should = {}
918 a8083063 Iustin Pop
919 a8083063 Iustin Pop
    for instance in instancelist:
920 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
921 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
922 a8083063 Iustin Pop
                                     feedback_fn)
923 a8083063 Iustin Pop
      bad = bad or result
924 a8083063 Iustin Pop
925 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
926 a8083063 Iustin Pop
927 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
928 a8083063 Iustin Pop
929 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
930 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
931 a8083063 Iustin Pop
                                       feedback_fn)
932 a8083063 Iustin Pop
    bad = bad or result
933 a8083063 Iustin Pop
934 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
935 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
936 a8083063 Iustin Pop
                                         feedback_fn)
937 a8083063 Iustin Pop
    bad = bad or result
938 a8083063 Iustin Pop
939 a8083063 Iustin Pop
    return int(bad)
940 a8083063 Iustin Pop
941 a8083063 Iustin Pop
942 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
943 07bd8a51 Iustin Pop
  """Rename the cluster.
944 07bd8a51 Iustin Pop

945 07bd8a51 Iustin Pop
  """
946 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
947 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
948 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
949 07bd8a51 Iustin Pop
950 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
951 07bd8a51 Iustin Pop
    """Build hooks env.
952 07bd8a51 Iustin Pop

953 07bd8a51 Iustin Pop
    """
954 07bd8a51 Iustin Pop
    env = {
955 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
956 07bd8a51 Iustin Pop
      }
957 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
958 07bd8a51 Iustin Pop
    return env, [mn], [mn]
959 07bd8a51 Iustin Pop
960 07bd8a51 Iustin Pop
  def CheckPrereq(self):
961 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
962 07bd8a51 Iustin Pop

963 07bd8a51 Iustin Pop
    """
964 07bd8a51 Iustin Pop
    hostname = utils.LookupHostname(self.op.name)
965 07bd8a51 Iustin Pop
    if not hostname:
966 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Cannot resolve the new cluster name ('%s')" %
967 07bd8a51 Iustin Pop
                                 self.op.name)
968 07bd8a51 Iustin Pop
969 bcf043c9 Iustin Pop
    new_name = hostname.name
970 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
971 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
972 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
973 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
974 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
975 07bd8a51 Iustin Pop
                                 " cluster has changed")
976 07bd8a51 Iustin Pop
    if new_ip != old_ip:
977 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
978 07bd8a51 Iustin Pop
      if not result.failed:
979 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
980 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
981 07bd8a51 Iustin Pop
                                   new_ip)
982 07bd8a51 Iustin Pop
983 07bd8a51 Iustin Pop
    self.op.name = new_name
984 07bd8a51 Iustin Pop
985 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
986 07bd8a51 Iustin Pop
    """Rename the cluster.
987 07bd8a51 Iustin Pop

988 07bd8a51 Iustin Pop
    """
989 07bd8a51 Iustin Pop
    clustername = self.op.name
990 07bd8a51 Iustin Pop
    ip = self.ip
991 07bd8a51 Iustin Pop
    ss = self.sstore
992 07bd8a51 Iustin Pop
993 07bd8a51 Iustin Pop
    # shutdown the master IP
994 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
995 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
996 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
997 07bd8a51 Iustin Pop
998 07bd8a51 Iustin Pop
    try:
999 07bd8a51 Iustin Pop
      # modify the sstore
1000 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
1001 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
1002 07bd8a51 Iustin Pop
1003 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
1004 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
1005 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
1006 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
1007 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1008 07bd8a51 Iustin Pop
1009 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1010 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1011 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1012 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1013 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1014 07bd8a51 Iustin Pop
          if not result[to_node]:
1015 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1016 07bd8a51 Iustin Pop
                         (fname, to_node))
1017 07bd8a51 Iustin Pop
    finally:
1018 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
1019 07bd8a51 Iustin Pop
        logger.Error("Could not re-enable the master role on the master,\n"
1020 07bd8a51 Iustin Pop
                     "please restart manually.")
1021 07bd8a51 Iustin Pop
1022 07bd8a51 Iustin Pop
1023 a8083063 Iustin Pop
def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
1024 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1025 a8083063 Iustin Pop

1026 a8083063 Iustin Pop
  """
1027 a8083063 Iustin Pop
  if not instance.disks:
1028 a8083063 Iustin Pop
    return True
1029 a8083063 Iustin Pop
1030 a8083063 Iustin Pop
  if not oneshot:
1031 a8083063 Iustin Pop
    logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
1032 a8083063 Iustin Pop
1033 a8083063 Iustin Pop
  node = instance.primary_node
1034 a8083063 Iustin Pop
1035 a8083063 Iustin Pop
  for dev in instance.disks:
1036 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1037 a8083063 Iustin Pop
1038 a8083063 Iustin Pop
  retries = 0
1039 a8083063 Iustin Pop
  while True:
1040 a8083063 Iustin Pop
    max_time = 0
1041 a8083063 Iustin Pop
    done = True
1042 a8083063 Iustin Pop
    cumul_degraded = False
1043 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1044 a8083063 Iustin Pop
    if not rstats:
1045 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1046 a8083063 Iustin Pop
      retries += 1
1047 a8083063 Iustin Pop
      if retries >= 10:
1048 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1049 3ecf6786 Iustin Pop
                                 " aborting." % node)
1050 a8083063 Iustin Pop
      time.sleep(6)
1051 a8083063 Iustin Pop
      continue
1052 a8083063 Iustin Pop
    retries = 0
1053 a8083063 Iustin Pop
    for i in range(len(rstats)):
1054 a8083063 Iustin Pop
      mstat = rstats[i]
1055 a8083063 Iustin Pop
      if mstat is None:
1056 a8083063 Iustin Pop
        logger.ToStderr("Can't compute data for node %s/%s" %
1057 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1058 a8083063 Iustin Pop
        continue
1059 a8083063 Iustin Pop
      perc_done, est_time, is_degraded = mstat
1060 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1061 a8083063 Iustin Pop
      if perc_done is not None:
1062 a8083063 Iustin Pop
        done = False
1063 a8083063 Iustin Pop
        if est_time is not None:
1064 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1065 a8083063 Iustin Pop
          max_time = est_time
1066 a8083063 Iustin Pop
        else:
1067 a8083063 Iustin Pop
          rem_time = "no time estimate"
1068 a8083063 Iustin Pop
        logger.ToStdout("- device %s: %5.2f%% done, %s" %
1069 a8083063 Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1070 a8083063 Iustin Pop
    if done or oneshot:
1071 a8083063 Iustin Pop
      break
1072 a8083063 Iustin Pop
1073 a8083063 Iustin Pop
    if unlock:
1074 a8083063 Iustin Pop
      utils.Unlock('cmd')
1075 a8083063 Iustin Pop
    try:
1076 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1077 a8083063 Iustin Pop
    finally:
1078 a8083063 Iustin Pop
      if unlock:
1079 a8083063 Iustin Pop
        utils.Lock('cmd')
1080 a8083063 Iustin Pop
1081 a8083063 Iustin Pop
  if done:
1082 a8083063 Iustin Pop
    logger.ToStdout("Instance %s's disks are in sync." % instance.name)
1083 a8083063 Iustin Pop
  return not cumul_degraded
1084 a8083063 Iustin Pop
1085 a8083063 Iustin Pop
1086 a8083063 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary):
1087 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1088 a8083063 Iustin Pop

1089 a8083063 Iustin Pop
  """
1090 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1091 a8083063 Iustin Pop
1092 a8083063 Iustin Pop
  result = True
1093 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1094 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1095 a8083063 Iustin Pop
    if not rstats:
1096 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1097 a8083063 Iustin Pop
      result = False
1098 a8083063 Iustin Pop
    else:
1099 a8083063 Iustin Pop
      result = result and (not rstats[5])
1100 a8083063 Iustin Pop
  if dev.children:
1101 a8083063 Iustin Pop
    for child in dev.children:
1102 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1103 a8083063 Iustin Pop
1104 a8083063 Iustin Pop
  return result
1105 a8083063 Iustin Pop
1106 a8083063 Iustin Pop
1107 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1108 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1109 a8083063 Iustin Pop

1110 a8083063 Iustin Pop
  """
1111 a8083063 Iustin Pop
  _OP_REQP = []
1112 a8083063 Iustin Pop
1113 a8083063 Iustin Pop
  def CheckPrereq(self):
1114 a8083063 Iustin Pop
    """Check prerequisites.
1115 a8083063 Iustin Pop

1116 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1117 a8083063 Iustin Pop

1118 a8083063 Iustin Pop
    """
1119 a8083063 Iustin Pop
    return
1120 a8083063 Iustin Pop
1121 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1122 a8083063 Iustin Pop
    """Compute the list of OSes.
1123 a8083063 Iustin Pop

1124 a8083063 Iustin Pop
    """
1125 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1126 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1127 a8083063 Iustin Pop
    if node_data == False:
1128 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1129 a8083063 Iustin Pop
    return node_data
1130 a8083063 Iustin Pop
1131 a8083063 Iustin Pop
1132 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1133 a8083063 Iustin Pop
  """Logical unit for removing a node.
1134 a8083063 Iustin Pop

1135 a8083063 Iustin Pop
  """
1136 a8083063 Iustin Pop
  HPATH = "node-remove"
1137 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1138 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1139 a8083063 Iustin Pop
1140 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1141 a8083063 Iustin Pop
    """Build hooks env.
1142 a8083063 Iustin Pop

1143 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1144 a8083063 Iustin Pop
    node would not allows itself to run.
1145 a8083063 Iustin Pop

1146 a8083063 Iustin Pop
    """
1147 396e1b78 Michael Hanselmann
    env = {
1148 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1149 396e1b78 Michael Hanselmann
      }
1150 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1151 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1152 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1153 a8083063 Iustin Pop
1154 a8083063 Iustin Pop
  def CheckPrereq(self):
1155 a8083063 Iustin Pop
    """Check prerequisites.
1156 a8083063 Iustin Pop

1157 a8083063 Iustin Pop
    This checks:
1158 a8083063 Iustin Pop
     - the node exists in the configuration
1159 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1160 a8083063 Iustin Pop
     - it's not the master
1161 a8083063 Iustin Pop

1162 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1163 a8083063 Iustin Pop

1164 a8083063 Iustin Pop
    """
1165 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1166 a8083063 Iustin Pop
    if node is None:
1167 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1168 a8083063 Iustin Pop
1169 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1170 a8083063 Iustin Pop
1171 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1172 a8083063 Iustin Pop
    if node.name == masternode:
1173 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1174 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1175 a8083063 Iustin Pop
1176 a8083063 Iustin Pop
    for instance_name in instance_list:
1177 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1178 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1179 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1180 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1181 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1182 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1183 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1184 a8083063 Iustin Pop
    self.op.node_name = node.name
1185 a8083063 Iustin Pop
    self.node = node
1186 a8083063 Iustin Pop
1187 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1188 a8083063 Iustin Pop
    """Removes the node from the cluster.
1189 a8083063 Iustin Pop

1190 a8083063 Iustin Pop
    """
1191 a8083063 Iustin Pop
    node = self.node
1192 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1193 a8083063 Iustin Pop
                node.name)
1194 a8083063 Iustin Pop
1195 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1196 a8083063 Iustin Pop
1197 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1198 a8083063 Iustin Pop
1199 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1200 a8083063 Iustin Pop
1201 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1202 a8083063 Iustin Pop
1203 a8083063 Iustin Pop
1204 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1205 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1206 a8083063 Iustin Pop

1207 a8083063 Iustin Pop
  """
1208 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1209 a8083063 Iustin Pop
1210 a8083063 Iustin Pop
  def CheckPrereq(self):
1211 a8083063 Iustin Pop
    """Check prerequisites.
1212 a8083063 Iustin Pop

1213 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1214 a8083063 Iustin Pop

1215 a8083063 Iustin Pop
    """
1216 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1217 a8083063 Iustin Pop
                                     "mtotal", "mnode", "mfree"])
1218 a8083063 Iustin Pop
1219 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1220 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1221 ec223efb Iustin Pop
                               "pip", "sip"],
1222 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1223 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1224 a8083063 Iustin Pop
1225 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1226 a8083063 Iustin Pop
1227 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1228 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1229 a8083063 Iustin Pop

1230 a8083063 Iustin Pop
    """
1231 246e180a Iustin Pop
    nodenames = self.wanted
1232 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1233 a8083063 Iustin Pop
1234 a8083063 Iustin Pop
    # begin data gathering
1235 a8083063 Iustin Pop
1236 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1237 a8083063 Iustin Pop
      live_data = {}
1238 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1239 a8083063 Iustin Pop
      for name in nodenames:
1240 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1241 a8083063 Iustin Pop
        if nodeinfo:
1242 a8083063 Iustin Pop
          live_data[name] = {
1243 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1244 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1245 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1246 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1247 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1248 a8083063 Iustin Pop
            }
1249 a8083063 Iustin Pop
        else:
1250 a8083063 Iustin Pop
          live_data[name] = {}
1251 a8083063 Iustin Pop
    else:
1252 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1253 a8083063 Iustin Pop
1254 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1255 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1256 a8083063 Iustin Pop
1257 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1258 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1259 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1260 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1261 a8083063 Iustin Pop
1262 ec223efb Iustin Pop
      for instance_name in instancelist:
1263 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1264 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1265 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1266 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1267 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1268 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1269 a8083063 Iustin Pop
1270 a8083063 Iustin Pop
    # end data gathering
1271 a8083063 Iustin Pop
1272 a8083063 Iustin Pop
    output = []
1273 a8083063 Iustin Pop
    for node in nodelist:
1274 a8083063 Iustin Pop
      node_output = []
1275 a8083063 Iustin Pop
      for field in self.op.output_fields:
1276 a8083063 Iustin Pop
        if field == "name":
1277 a8083063 Iustin Pop
          val = node.name
1278 ec223efb Iustin Pop
        elif field == "pinst_list":
1279 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1280 ec223efb Iustin Pop
        elif field == "sinst_list":
1281 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1282 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1283 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1284 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1285 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1286 a8083063 Iustin Pop
        elif field == "pip":
1287 a8083063 Iustin Pop
          val = node.primary_ip
1288 a8083063 Iustin Pop
        elif field == "sip":
1289 a8083063 Iustin Pop
          val = node.secondary_ip
1290 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1291 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1292 a8083063 Iustin Pop
        else:
1293 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1294 a8083063 Iustin Pop
        node_output.append(val)
1295 a8083063 Iustin Pop
      output.append(node_output)
1296 a8083063 Iustin Pop
1297 a8083063 Iustin Pop
    return output
1298 a8083063 Iustin Pop
1299 a8083063 Iustin Pop
1300 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1301 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1302 dcb93971 Michael Hanselmann

1303 dcb93971 Michael Hanselmann
  """
1304 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1305 dcb93971 Michael Hanselmann
1306 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1307 dcb93971 Michael Hanselmann
    """Check prerequisites.
1308 dcb93971 Michael Hanselmann

1309 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1310 dcb93971 Michael Hanselmann

1311 dcb93971 Michael Hanselmann
    """
1312 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1313 dcb93971 Michael Hanselmann
1314 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1315 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1316 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1317 dcb93971 Michael Hanselmann
1318 dcb93971 Michael Hanselmann
1319 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1320 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1321 dcb93971 Michael Hanselmann

1322 dcb93971 Michael Hanselmann
    """
1323 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1324 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1325 dcb93971 Michael Hanselmann
1326 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1327 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1328 dcb93971 Michael Hanselmann
1329 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1330 dcb93971 Michael Hanselmann
1331 dcb93971 Michael Hanselmann
    output = []
1332 dcb93971 Michael Hanselmann
    for node in nodenames:
1333 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1334 37d19eb2 Michael Hanselmann
        continue
1335 37d19eb2 Michael Hanselmann
1336 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1337 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1338 dcb93971 Michael Hanselmann
1339 dcb93971 Michael Hanselmann
      for vol in node_vols:
1340 dcb93971 Michael Hanselmann
        node_output = []
1341 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1342 dcb93971 Michael Hanselmann
          if field == "node":
1343 dcb93971 Michael Hanselmann
            val = node
1344 dcb93971 Michael Hanselmann
          elif field == "phys":
1345 dcb93971 Michael Hanselmann
            val = vol['dev']
1346 dcb93971 Michael Hanselmann
          elif field == "vg":
1347 dcb93971 Michael Hanselmann
            val = vol['vg']
1348 dcb93971 Michael Hanselmann
          elif field == "name":
1349 dcb93971 Michael Hanselmann
            val = vol['name']
1350 dcb93971 Michael Hanselmann
          elif field == "size":
1351 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1352 dcb93971 Michael Hanselmann
          elif field == "instance":
1353 dcb93971 Michael Hanselmann
            for inst in ilist:
1354 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1355 dcb93971 Michael Hanselmann
                continue
1356 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1357 dcb93971 Michael Hanselmann
                val = inst.name
1358 dcb93971 Michael Hanselmann
                break
1359 dcb93971 Michael Hanselmann
            else:
1360 dcb93971 Michael Hanselmann
              val = '-'
1361 dcb93971 Michael Hanselmann
          else:
1362 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1363 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1364 dcb93971 Michael Hanselmann
1365 dcb93971 Michael Hanselmann
        output.append(node_output)
1366 dcb93971 Michael Hanselmann
1367 dcb93971 Michael Hanselmann
    return output
1368 dcb93971 Michael Hanselmann
1369 dcb93971 Michael Hanselmann
1370 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1371 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1372 a8083063 Iustin Pop

1373 a8083063 Iustin Pop
  """
1374 a8083063 Iustin Pop
  HPATH = "node-add"
1375 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1376 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1377 a8083063 Iustin Pop
1378 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1379 a8083063 Iustin Pop
    """Build hooks env.
1380 a8083063 Iustin Pop

1381 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1382 a8083063 Iustin Pop

1383 a8083063 Iustin Pop
    """
1384 a8083063 Iustin Pop
    env = {
1385 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1386 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1387 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1388 a8083063 Iustin Pop
      }
1389 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1390 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1391 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1392 a8083063 Iustin Pop
1393 a8083063 Iustin Pop
  def CheckPrereq(self):
1394 a8083063 Iustin Pop
    """Check prerequisites.
1395 a8083063 Iustin Pop

1396 a8083063 Iustin Pop
    This checks:
1397 a8083063 Iustin Pop
     - the new node is not already in the config
1398 a8083063 Iustin Pop
     - it is resolvable
1399 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1400 a8083063 Iustin Pop

1401 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1402 a8083063 Iustin Pop

1403 a8083063 Iustin Pop
    """
1404 a8083063 Iustin Pop
    node_name = self.op.node_name
1405 a8083063 Iustin Pop
    cfg = self.cfg
1406 a8083063 Iustin Pop
1407 a8083063 Iustin Pop
    dns_data = utils.LookupHostname(node_name)
1408 a8083063 Iustin Pop
    if not dns_data:
1409 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is not resolvable" % node_name)
1410 a8083063 Iustin Pop
1411 bcf043c9 Iustin Pop
    node = dns_data.name
1412 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1413 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1414 a8083063 Iustin Pop
    if secondary_ip is None:
1415 a8083063 Iustin Pop
      secondary_ip = primary_ip
1416 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1417 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1418 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1419 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1420 a8083063 Iustin Pop
    if node in node_list:
1421 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1422 3ecf6786 Iustin Pop
                                 % node)
1423 a8083063 Iustin Pop
1424 a8083063 Iustin Pop
    for existing_node_name in node_list:
1425 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1426 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1427 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1428 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1429 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1430 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1431 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1432 a8083063 Iustin Pop
1433 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1434 a8083063 Iustin Pop
    # same as for the master
1435 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1436 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1437 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1438 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1439 a8083063 Iustin Pop
      if master_singlehomed:
1440 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1441 3ecf6786 Iustin Pop
                                   " new node has one")
1442 a8083063 Iustin Pop
      else:
1443 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1444 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1445 a8083063 Iustin Pop
1446 a8083063 Iustin Pop
    # checks reachablity
1447 a8083063 Iustin Pop
    command = ["fping", "-q", primary_ip]
1448 a8083063 Iustin Pop
    result = utils.RunCmd(command)
1449 a8083063 Iustin Pop
    if result.failed:
1450 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1451 a8083063 Iustin Pop
1452 a8083063 Iustin Pop
    if not newbie_singlehomed:
1453 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1454 a8083063 Iustin Pop
      command = ["fping", "-S%s" % myself.secondary_ip, "-q", secondary_ip]
1455 a8083063 Iustin Pop
      result = utils.RunCmd(command)
1456 a8083063 Iustin Pop
      if result.failed:
1457 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node secondary ip not reachable by ping")
1458 a8083063 Iustin Pop
1459 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1460 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1461 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1462 a8083063 Iustin Pop
1463 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1464 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1465 a8083063 Iustin Pop

1466 a8083063 Iustin Pop
    """
1467 a8083063 Iustin Pop
    new_node = self.new_node
1468 a8083063 Iustin Pop
    node = new_node.name
1469 a8083063 Iustin Pop
1470 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1471 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1472 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1473 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1474 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1475 a8083063 Iustin Pop
    try:
1476 a8083063 Iustin Pop
      gntpem = f.read(8192)
1477 a8083063 Iustin Pop
    finally:
1478 a8083063 Iustin Pop
      f.close()
1479 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1480 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1481 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1482 a8083063 Iustin Pop
    # parsed by the shell sequence below
1483 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1484 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1485 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1486 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1487 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1488 a8083063 Iustin Pop
1489 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1490 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1491 a8083063 Iustin Pop
    # either by being constants or by the checks above
1492 a8083063 Iustin Pop
    ss = self.sstore
1493 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1494 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1495 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1496 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1497 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1498 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1499 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1500 a8083063 Iustin Pop
1501 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1502 a8083063 Iustin Pop
    if result.failed:
1503 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1504 3ecf6786 Iustin Pop
                               " output: %s" %
1505 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1506 a8083063 Iustin Pop
1507 a8083063 Iustin Pop
    # check connectivity
1508 a8083063 Iustin Pop
    time.sleep(4)
1509 a8083063 Iustin Pop
1510 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1511 a8083063 Iustin Pop
    if result:
1512 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1513 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1514 a8083063 Iustin Pop
                    (node, result))
1515 a8083063 Iustin Pop
      else:
1516 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1517 3ecf6786 Iustin Pop
                                 " node version %s" %
1518 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1519 a8083063 Iustin Pop
    else:
1520 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1521 a8083063 Iustin Pop
1522 a8083063 Iustin Pop
    # setup ssh on node
1523 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1524 a8083063 Iustin Pop
    keyarray = []
1525 a8083063 Iustin Pop
    keyfiles = ["/etc/ssh/ssh_host_dsa_key", "/etc/ssh/ssh_host_dsa_key.pub",
1526 a8083063 Iustin Pop
                "/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key.pub",
1527 a8083063 Iustin Pop
                "/root/.ssh/id_dsa", "/root/.ssh/id_dsa.pub"]
1528 a8083063 Iustin Pop
1529 a8083063 Iustin Pop
    for i in keyfiles:
1530 a8083063 Iustin Pop
      f = open(i, 'r')
1531 a8083063 Iustin Pop
      try:
1532 a8083063 Iustin Pop
        keyarray.append(f.read())
1533 a8083063 Iustin Pop
      finally:
1534 a8083063 Iustin Pop
        f.close()
1535 a8083063 Iustin Pop
1536 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1537 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1538 a8083063 Iustin Pop
1539 a8083063 Iustin Pop
    if not result:
1540 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1541 a8083063 Iustin Pop
1542 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1543 a8083063 Iustin Pop
    _UpdateEtcHosts(new_node.name, new_node.primary_ip)
1544 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1545 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1546 a8083063 Iustin Pop
1547 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1548 a8083063 Iustin Pop
      result = ssh.SSHCall(node, "root",
1549 a8083063 Iustin Pop
                           "fping -S 127.0.0.1 -q %s" % new_node.secondary_ip)
1550 a8083063 Iustin Pop
      if result.failed:
1551 3ecf6786 Iustin Pop
        raise errors.OpExecError("Node claims it doesn't have the"
1552 3ecf6786 Iustin Pop
                                 " secondary ip you gave (%s).\n"
1553 3ecf6786 Iustin Pop
                                 "Please fix and re-run this command." %
1554 3ecf6786 Iustin Pop
                                 new_node.secondary_ip)
1555 a8083063 Iustin Pop
1556 ff98055b Iustin Pop
    success, msg = ssh.VerifyNodeHostname(node)
1557 ff98055b Iustin Pop
    if not success:
1558 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1559 ff98055b Iustin Pop
                               " than the one the resolver gives: %s.\n"
1560 ff98055b Iustin Pop
                               "Please fix and re-run this command." %
1561 ff98055b Iustin Pop
                               (node, msg))
1562 ff98055b Iustin Pop
1563 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1564 a8083063 Iustin Pop
    # including the node just added
1565 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1566 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1567 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1568 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1569 a8083063 Iustin Pop
1570 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1571 82122173 Iustin Pop
    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
1572 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1573 a8083063 Iustin Pop
      for to_node in dist_nodes:
1574 a8083063 Iustin Pop
        if not result[to_node]:
1575 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1576 a8083063 Iustin Pop
                       (fname, to_node))
1577 a8083063 Iustin Pop
1578 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1579 a8083063 Iustin Pop
    for fname in to_copy:
1580 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1581 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1582 a8083063 Iustin Pop
1583 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1584 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1585 a8083063 Iustin Pop
1586 a8083063 Iustin Pop
1587 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1588 a8083063 Iustin Pop
  """Failover the master node to the current node.
1589 a8083063 Iustin Pop

1590 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1591 a8083063 Iustin Pop

1592 a8083063 Iustin Pop
  """
1593 a8083063 Iustin Pop
  HPATH = "master-failover"
1594 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1595 a8083063 Iustin Pop
  REQ_MASTER = False
1596 a8083063 Iustin Pop
  _OP_REQP = []
1597 a8083063 Iustin Pop
1598 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1599 a8083063 Iustin Pop
    """Build hooks env.
1600 a8083063 Iustin Pop

1601 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1602 a8083063 Iustin Pop
    the nodes in the post phase.
1603 a8083063 Iustin Pop

1604 a8083063 Iustin Pop
    """
1605 a8083063 Iustin Pop
    env = {
1606 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1607 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1608 a8083063 Iustin Pop
      }
1609 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1610 a8083063 Iustin Pop
1611 a8083063 Iustin Pop
  def CheckPrereq(self):
1612 a8083063 Iustin Pop
    """Check prerequisites.
1613 a8083063 Iustin Pop

1614 a8083063 Iustin Pop
    This checks that we are not already the master.
1615 a8083063 Iustin Pop

1616 a8083063 Iustin Pop
    """
1617 a8083063 Iustin Pop
    self.new_master = socket.gethostname()
1618 a8083063 Iustin Pop
1619 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1620 a8083063 Iustin Pop
1621 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1622 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1623 3ecf6786 Iustin Pop
                                 " where you want the new master to be.\n"
1624 3ecf6786 Iustin Pop
                                 "%s is already the master" %
1625 3ecf6786 Iustin Pop
                                 self.old_master)
1626 a8083063 Iustin Pop
1627 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1628 a8083063 Iustin Pop
    """Failover the master node.
1629 a8083063 Iustin Pop

1630 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1631 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1632 a8083063 Iustin Pop
    master.
1633 a8083063 Iustin Pop

1634 a8083063 Iustin Pop
    """
1635 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1636 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1637 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1638 a8083063 Iustin Pop
1639 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1640 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1641 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1642 a8083063 Iustin Pop
1643 880478f8 Iustin Pop
    ss = self.sstore
1644 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1645 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1646 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1647 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1648 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1649 880478f8 Iustin Pop
1650 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1651 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1652 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1653 880478f8 Iustin Pop
      feedback_fn("Error in activating the master IP on the new master,\n"
1654 880478f8 Iustin Pop
                  "please fix manually.")
1655 a8083063 Iustin Pop
1656 a8083063 Iustin Pop
1657 a8083063 Iustin Pop
1658 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1659 a8083063 Iustin Pop
  """Query cluster configuration.
1660 a8083063 Iustin Pop

1661 a8083063 Iustin Pop
  """
1662 a8083063 Iustin Pop
  _OP_REQP = []
1663 59322403 Iustin Pop
  REQ_MASTER = False
1664 a8083063 Iustin Pop
1665 a8083063 Iustin Pop
  def CheckPrereq(self):
1666 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1667 a8083063 Iustin Pop

1668 a8083063 Iustin Pop
    """
1669 a8083063 Iustin Pop
    pass
1670 a8083063 Iustin Pop
1671 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1672 a8083063 Iustin Pop
    """Return cluster config.
1673 a8083063 Iustin Pop

1674 a8083063 Iustin Pop
    """
1675 a8083063 Iustin Pop
    result = {
1676 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1677 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1678 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1679 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1680 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1681 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1682 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1683 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1684 a8083063 Iustin Pop
      }
1685 a8083063 Iustin Pop
1686 a8083063 Iustin Pop
    return result
1687 a8083063 Iustin Pop
1688 a8083063 Iustin Pop
1689 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1690 a8083063 Iustin Pop
  """Copy file to cluster.
1691 a8083063 Iustin Pop

1692 a8083063 Iustin Pop
  """
1693 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1694 a8083063 Iustin Pop
1695 a8083063 Iustin Pop
  def CheckPrereq(self):
1696 a8083063 Iustin Pop
    """Check prerequisites.
1697 a8083063 Iustin Pop

1698 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1699 a8083063 Iustin Pop
    of nodes is valid.
1700 a8083063 Iustin Pop

1701 a8083063 Iustin Pop
    """
1702 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1703 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1704 dcb93971 Michael Hanselmann
1705 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1706 a8083063 Iustin Pop
1707 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1708 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1709 a8083063 Iustin Pop

1710 a8083063 Iustin Pop
    Args:
1711 a8083063 Iustin Pop
      opts - class with options as members
1712 a8083063 Iustin Pop
      args - list containing a single element, the file name
1713 a8083063 Iustin Pop
    Opts used:
1714 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1715 a8083063 Iustin Pop

1716 a8083063 Iustin Pop
    """
1717 a8083063 Iustin Pop
    filename = self.op.filename
1718 a8083063 Iustin Pop
1719 a8083063 Iustin Pop
    myname = socket.gethostname()
1720 a8083063 Iustin Pop
1721 a7ba5e53 Iustin Pop
    for node in self.nodes:
1722 a8083063 Iustin Pop
      if node == myname:
1723 a8083063 Iustin Pop
        continue
1724 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1725 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1726 a8083063 Iustin Pop
1727 a8083063 Iustin Pop
1728 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1729 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1730 a8083063 Iustin Pop

1731 a8083063 Iustin Pop
  """
1732 a8083063 Iustin Pop
  _OP_REQP = []
1733 a8083063 Iustin Pop
1734 a8083063 Iustin Pop
  def CheckPrereq(self):
1735 a8083063 Iustin Pop
    """No prerequisites.
1736 a8083063 Iustin Pop

1737 a8083063 Iustin Pop
    """
1738 a8083063 Iustin Pop
    pass
1739 a8083063 Iustin Pop
1740 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1741 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1742 a8083063 Iustin Pop

1743 a8083063 Iustin Pop
    """
1744 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1745 a8083063 Iustin Pop
1746 a8083063 Iustin Pop
1747 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1748 a8083063 Iustin Pop
  """Run a command on some nodes.
1749 a8083063 Iustin Pop

1750 a8083063 Iustin Pop
  """
1751 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1752 a8083063 Iustin Pop
1753 a8083063 Iustin Pop
  def CheckPrereq(self):
1754 a8083063 Iustin Pop
    """Check prerequisites.
1755 a8083063 Iustin Pop

1756 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1757 a8083063 Iustin Pop

1758 a8083063 Iustin Pop
    """
1759 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1760 a8083063 Iustin Pop
1761 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1762 a8083063 Iustin Pop
    """Run a command on some nodes.
1763 a8083063 Iustin Pop

1764 a8083063 Iustin Pop
    """
1765 a8083063 Iustin Pop
    data = []
1766 a8083063 Iustin Pop
    for node in self.nodes:
1767 a7ba5e53 Iustin Pop
      result = ssh.SSHCall(node, "root", self.op.command)
1768 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1769 a8083063 Iustin Pop
1770 a8083063 Iustin Pop
    return data
1771 a8083063 Iustin Pop
1772 a8083063 Iustin Pop
1773 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1774 a8083063 Iustin Pop
  """Bring up an instance's disks.
1775 a8083063 Iustin Pop

1776 a8083063 Iustin Pop
  """
1777 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1778 a8083063 Iustin Pop
1779 a8083063 Iustin Pop
  def CheckPrereq(self):
1780 a8083063 Iustin Pop
    """Check prerequisites.
1781 a8083063 Iustin Pop

1782 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1783 a8083063 Iustin Pop

1784 a8083063 Iustin Pop
    """
1785 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1786 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1787 a8083063 Iustin Pop
    if instance is None:
1788 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1789 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1790 a8083063 Iustin Pop
    self.instance = instance
1791 a8083063 Iustin Pop
1792 a8083063 Iustin Pop
1793 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1794 a8083063 Iustin Pop
    """Activate the disks.
1795 a8083063 Iustin Pop

1796 a8083063 Iustin Pop
    """
1797 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1798 a8083063 Iustin Pop
    if not disks_ok:
1799 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1800 a8083063 Iustin Pop
1801 a8083063 Iustin Pop
    return disks_info
1802 a8083063 Iustin Pop
1803 a8083063 Iustin Pop
1804 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1805 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1806 a8083063 Iustin Pop

1807 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1808 a8083063 Iustin Pop

1809 a8083063 Iustin Pop
  Args:
1810 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1811 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1812 a8083063 Iustin Pop
                        in an error return from the function
1813 a8083063 Iustin Pop

1814 a8083063 Iustin Pop
  Returns:
1815 a8083063 Iustin Pop
    false if the operation failed
1816 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1817 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1818 a8083063 Iustin Pop
  """
1819 a8083063 Iustin Pop
  device_info = []
1820 a8083063 Iustin Pop
  disks_ok = True
1821 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1822 a8083063 Iustin Pop
    master_result = None
1823 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1824 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1825 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1826 a8083063 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk, is_primary)
1827 a8083063 Iustin Pop
      if not result:
1828 a8083063 Iustin Pop
        logger.Error("could not prepare block device %s on node %s (is_pri"
1829 a8083063 Iustin Pop
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1830 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1831 a8083063 Iustin Pop
          disks_ok = False
1832 a8083063 Iustin Pop
      if is_primary:
1833 a8083063 Iustin Pop
        master_result = result
1834 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1835 a8083063 Iustin Pop
                        master_result))
1836 a8083063 Iustin Pop
1837 a8083063 Iustin Pop
  return disks_ok, device_info
1838 a8083063 Iustin Pop
1839 a8083063 Iustin Pop
1840 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1841 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1842 3ecf6786 Iustin Pop

1843 3ecf6786 Iustin Pop
  """
1844 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1845 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1846 fe7b0351 Michael Hanselmann
  if not disks_ok:
1847 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1848 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1849 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1850 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1851 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1852 fe7b0351 Michael Hanselmann
1853 fe7b0351 Michael Hanselmann
1854 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1855 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1856 a8083063 Iustin Pop

1857 a8083063 Iustin Pop
  """
1858 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1859 a8083063 Iustin Pop
1860 a8083063 Iustin Pop
  def CheckPrereq(self):
1861 a8083063 Iustin Pop
    """Check prerequisites.
1862 a8083063 Iustin Pop

1863 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1864 a8083063 Iustin Pop

1865 a8083063 Iustin Pop
    """
1866 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1867 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1868 a8083063 Iustin Pop
    if instance is None:
1869 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1870 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1871 a8083063 Iustin Pop
    self.instance = instance
1872 a8083063 Iustin Pop
1873 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1874 a8083063 Iustin Pop
    """Deactivate the disks
1875 a8083063 Iustin Pop

1876 a8083063 Iustin Pop
    """
1877 a8083063 Iustin Pop
    instance = self.instance
1878 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1879 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1880 a8083063 Iustin Pop
    if not type(ins_l) is list:
1881 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1882 3ecf6786 Iustin Pop
                               instance.primary_node)
1883 a8083063 Iustin Pop
1884 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1885 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1886 3ecf6786 Iustin Pop
                               " block devices.")
1887 a8083063 Iustin Pop
1888 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1889 a8083063 Iustin Pop
1890 a8083063 Iustin Pop
1891 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1892 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1893 a8083063 Iustin Pop

1894 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1895 a8083063 Iustin Pop

1896 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1897 a8083063 Iustin Pop
  ignored.
1898 a8083063 Iustin Pop

1899 a8083063 Iustin Pop
  """
1900 a8083063 Iustin Pop
  result = True
1901 a8083063 Iustin Pop
  for disk in instance.disks:
1902 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1903 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1904 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1905 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1906 a8083063 Iustin Pop
                     (disk.iv_name, node))
1907 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1908 a8083063 Iustin Pop
          result = False
1909 a8083063 Iustin Pop
  return result
1910 a8083063 Iustin Pop
1911 a8083063 Iustin Pop
1912 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1913 a8083063 Iustin Pop
  """Starts an instance.
1914 a8083063 Iustin Pop

1915 a8083063 Iustin Pop
  """
1916 a8083063 Iustin Pop
  HPATH = "instance-start"
1917 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1918 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1919 a8083063 Iustin Pop
1920 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1921 a8083063 Iustin Pop
    """Build hooks env.
1922 a8083063 Iustin Pop

1923 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1924 a8083063 Iustin Pop

1925 a8083063 Iustin Pop
    """
1926 a8083063 Iustin Pop
    env = {
1927 a8083063 Iustin Pop
      "FORCE": self.op.force,
1928 a8083063 Iustin Pop
      }
1929 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1930 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1931 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1932 a8083063 Iustin Pop
    return env, nl, nl
1933 a8083063 Iustin Pop
1934 a8083063 Iustin Pop
  def CheckPrereq(self):
1935 a8083063 Iustin Pop
    """Check prerequisites.
1936 a8083063 Iustin Pop

1937 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1938 a8083063 Iustin Pop

1939 a8083063 Iustin Pop
    """
1940 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1941 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1942 a8083063 Iustin Pop
    if instance is None:
1943 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1944 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1945 a8083063 Iustin Pop
1946 a8083063 Iustin Pop
    # check bridges existance
1947 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
1948 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
1949 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("one or more target bridges %s does not"
1950 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
1951 3ecf6786 Iustin Pop
                                 (brlist, instance.primary_node))
1952 a8083063 Iustin Pop
1953 a8083063 Iustin Pop
    self.instance = instance
1954 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1955 a8083063 Iustin Pop
1956 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1957 a8083063 Iustin Pop
    """Start the instance.
1958 a8083063 Iustin Pop

1959 a8083063 Iustin Pop
    """
1960 a8083063 Iustin Pop
    instance = self.instance
1961 a8083063 Iustin Pop
    force = self.op.force
1962 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1963 a8083063 Iustin Pop
1964 a8083063 Iustin Pop
    node_current = instance.primary_node
1965 a8083063 Iustin Pop
1966 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1967 a8083063 Iustin Pop
    if not nodeinfo:
1968 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact node %s for infos" %
1969 3ecf6786 Iustin Pop
                               (node_current))
1970 a8083063 Iustin Pop
1971 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
1972 a8083063 Iustin Pop
    memory = instance.memory
1973 a8083063 Iustin Pop
    if memory > freememory:
1974 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to start instance"
1975 3ecf6786 Iustin Pop
                               " %s on node %s"
1976 3ecf6786 Iustin Pop
                               " needed %s MiB, available %s MiB" %
1977 3ecf6786 Iustin Pop
                               (instance.name, node_current, memory,
1978 3ecf6786 Iustin Pop
                                freememory))
1979 a8083063 Iustin Pop
1980 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
1981 a8083063 Iustin Pop
1982 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
1983 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
1984 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
1985 a8083063 Iustin Pop
1986 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
1987 a8083063 Iustin Pop
1988 a8083063 Iustin Pop
1989 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
1990 a8083063 Iustin Pop
  """Shutdown an instance.
1991 a8083063 Iustin Pop

1992 a8083063 Iustin Pop
  """
1993 a8083063 Iustin Pop
  HPATH = "instance-stop"
1994 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1995 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1996 a8083063 Iustin Pop
1997 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1998 a8083063 Iustin Pop
    """Build hooks env.
1999 a8083063 Iustin Pop

2000 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2001 a8083063 Iustin Pop

2002 a8083063 Iustin Pop
    """
2003 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2004 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2005 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2006 a8083063 Iustin Pop
    return env, nl, nl
2007 a8083063 Iustin Pop
2008 a8083063 Iustin Pop
  def CheckPrereq(self):
2009 a8083063 Iustin Pop
    """Check prerequisites.
2010 a8083063 Iustin Pop

2011 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2012 a8083063 Iustin Pop

2013 a8083063 Iustin Pop
    """
2014 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2015 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2016 a8083063 Iustin Pop
    if instance is None:
2017 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2018 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2019 a8083063 Iustin Pop
    self.instance = instance
2020 a8083063 Iustin Pop
2021 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2022 a8083063 Iustin Pop
    """Shutdown the instance.
2023 a8083063 Iustin Pop

2024 a8083063 Iustin Pop
    """
2025 a8083063 Iustin Pop
    instance = self.instance
2026 a8083063 Iustin Pop
    node_current = instance.primary_node
2027 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2028 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2029 a8083063 Iustin Pop
2030 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2031 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2032 a8083063 Iustin Pop
2033 a8083063 Iustin Pop
2034 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2035 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2036 fe7b0351 Michael Hanselmann

2037 fe7b0351 Michael Hanselmann
  """
2038 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2039 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2040 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2041 fe7b0351 Michael Hanselmann
2042 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2043 fe7b0351 Michael Hanselmann
    """Build hooks env.
2044 fe7b0351 Michael Hanselmann

2045 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2046 fe7b0351 Michael Hanselmann

2047 fe7b0351 Michael Hanselmann
    """
2048 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2049 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2050 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2051 fe7b0351 Michael Hanselmann
    return env, nl, nl
2052 fe7b0351 Michael Hanselmann
2053 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2054 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2055 fe7b0351 Michael Hanselmann

2056 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2057 fe7b0351 Michael Hanselmann

2058 fe7b0351 Michael Hanselmann
    """
2059 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2060 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2061 fe7b0351 Michael Hanselmann
    if instance is None:
2062 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2063 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2064 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2065 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2066 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2067 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2068 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2069 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2070 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2071 fe7b0351 Michael Hanselmann
    if remote_info:
2072 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2073 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2074 3ecf6786 Iustin Pop
                                  instance.primary_node))
2075 d0834de3 Michael Hanselmann
2076 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2077 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2078 d0834de3 Michael Hanselmann
      # OS verification
2079 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2080 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2081 d0834de3 Michael Hanselmann
      if pnode is None:
2082 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2083 3ecf6786 Iustin Pop
                                   self.op.pnode)
2084 d0834de3 Michael Hanselmann
      os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2085 d0834de3 Michael Hanselmann
      if not isinstance(os_obj, objects.OS):
2086 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2087 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2088 d0834de3 Michael Hanselmann
2089 fe7b0351 Michael Hanselmann
    self.instance = instance
2090 fe7b0351 Michael Hanselmann
2091 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2092 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2093 fe7b0351 Michael Hanselmann

2094 fe7b0351 Michael Hanselmann
    """
2095 fe7b0351 Michael Hanselmann
    inst = self.instance
2096 fe7b0351 Michael Hanselmann
2097 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2098 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2099 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2100 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2101 d0834de3 Michael Hanselmann
2102 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2103 fe7b0351 Michael Hanselmann
    try:
2104 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2105 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2106 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not install OS for instance %s "
2107 3ecf6786 Iustin Pop
                                 "on node %s" %
2108 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2109 fe7b0351 Michael Hanselmann
    finally:
2110 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2111 fe7b0351 Michael Hanselmann
2112 fe7b0351 Michael Hanselmann
2113 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2114 decd5f45 Iustin Pop
  """Rename an instance.
2115 decd5f45 Iustin Pop

2116 decd5f45 Iustin Pop
  """
2117 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2118 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2119 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2120 decd5f45 Iustin Pop
2121 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2122 decd5f45 Iustin Pop
    """Build hooks env.
2123 decd5f45 Iustin Pop

2124 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2125 decd5f45 Iustin Pop

2126 decd5f45 Iustin Pop
    """
2127 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2128 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2129 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2130 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2131 decd5f45 Iustin Pop
    return env, nl, nl
2132 decd5f45 Iustin Pop
2133 decd5f45 Iustin Pop
  def CheckPrereq(self):
2134 decd5f45 Iustin Pop
    """Check prerequisites.
2135 decd5f45 Iustin Pop

2136 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2137 decd5f45 Iustin Pop

2138 decd5f45 Iustin Pop
    """
2139 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2140 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2141 decd5f45 Iustin Pop
    if instance is None:
2142 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2143 decd5f45 Iustin Pop
                                 self.op.instance_name)
2144 decd5f45 Iustin Pop
    if instance.status != "down":
2145 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2146 decd5f45 Iustin Pop
                                 self.op.instance_name)
2147 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2148 decd5f45 Iustin Pop
    if remote_info:
2149 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2150 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2151 decd5f45 Iustin Pop
                                  instance.primary_node))
2152 decd5f45 Iustin Pop
    self.instance = instance
2153 decd5f45 Iustin Pop
2154 decd5f45 Iustin Pop
    # new name verification
2155 decd5f45 Iustin Pop
    hostname1 = utils.LookupHostname(self.op.new_name)
2156 decd5f45 Iustin Pop
    if not hostname1:
2157 decd5f45 Iustin Pop
      raise errors.OpPrereqError("New instance name '%s' not found in dns" %
2158 decd5f45 Iustin Pop
                                 self.op.new_name)
2159 decd5f45 Iustin Pop
2160 bcf043c9 Iustin Pop
    self.op.new_name = new_name = hostname1.name
2161 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2162 bcf043c9 Iustin Pop
      command = ["fping", "-q", hostname1.ip]
2163 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2164 decd5f45 Iustin Pop
      if not result.failed:
2165 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2166 bcf043c9 Iustin Pop
                                   (hostname1.ip, new_name))
2167 decd5f45 Iustin Pop
2168 decd5f45 Iustin Pop
2169 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2170 decd5f45 Iustin Pop
    """Reinstall the instance.
2171 decd5f45 Iustin Pop

2172 decd5f45 Iustin Pop
    """
2173 decd5f45 Iustin Pop
    inst = self.instance
2174 decd5f45 Iustin Pop
    old_name = inst.name
2175 decd5f45 Iustin Pop
2176 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2177 decd5f45 Iustin Pop
2178 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2179 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2180 decd5f45 Iustin Pop
2181 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2182 decd5f45 Iustin Pop
    try:
2183 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2184 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2185 decd5f45 Iustin Pop
        msg = ("Could run OS rename script for instance %s\n"
2186 decd5f45 Iustin Pop
               "on node %s\n"
2187 decd5f45 Iustin Pop
               "(but the instance has been renamed in Ganeti)" %
2188 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2189 decd5f45 Iustin Pop
        logger.Error(msg)
2190 decd5f45 Iustin Pop
    finally:
2191 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2192 decd5f45 Iustin Pop
2193 decd5f45 Iustin Pop
2194 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2195 a8083063 Iustin Pop
  """Remove an instance.
2196 a8083063 Iustin Pop

2197 a8083063 Iustin Pop
  """
2198 a8083063 Iustin Pop
  HPATH = "instance-remove"
2199 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2200 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2201 a8083063 Iustin Pop
2202 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2203 a8083063 Iustin Pop
    """Build hooks env.
2204 a8083063 Iustin Pop

2205 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2206 a8083063 Iustin Pop

2207 a8083063 Iustin Pop
    """
2208 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2209 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2210 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2211 a8083063 Iustin Pop
    return env, nl, nl
2212 a8083063 Iustin Pop
2213 a8083063 Iustin Pop
  def CheckPrereq(self):
2214 a8083063 Iustin Pop
    """Check prerequisites.
2215 a8083063 Iustin Pop

2216 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2217 a8083063 Iustin Pop

2218 a8083063 Iustin Pop
    """
2219 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2220 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2221 a8083063 Iustin Pop
    if instance is None:
2222 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2223 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2224 a8083063 Iustin Pop
    self.instance = instance
2225 a8083063 Iustin Pop
2226 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2227 a8083063 Iustin Pop
    """Remove the instance.
2228 a8083063 Iustin Pop

2229 a8083063 Iustin Pop
    """
2230 a8083063 Iustin Pop
    instance = self.instance
2231 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2232 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2233 a8083063 Iustin Pop
2234 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2235 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2236 3ecf6786 Iustin Pop
                               (instance.name, instance.primary_node))
2237 a8083063 Iustin Pop
2238 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2239 a8083063 Iustin Pop
2240 a8083063 Iustin Pop
    _RemoveDisks(instance, self.cfg)
2241 a8083063 Iustin Pop
2242 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2243 a8083063 Iustin Pop
2244 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2245 a8083063 Iustin Pop
2246 a8083063 Iustin Pop
2247 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2248 a8083063 Iustin Pop
  """Logical unit for querying instances.
2249 a8083063 Iustin Pop

2250 a8083063 Iustin Pop
  """
2251 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2252 a8083063 Iustin Pop
2253 a8083063 Iustin Pop
  def CheckPrereq(self):
2254 a8083063 Iustin Pop
    """Check prerequisites.
2255 a8083063 Iustin Pop

2256 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2257 a8083063 Iustin Pop

2258 a8083063 Iustin Pop
    """
2259 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
2260 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2261 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2262 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2263 644eeef9 Iustin Pop
                               "sda_size", "sdb_size"],
2264 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2265 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2266 a8083063 Iustin Pop
2267 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2268 069dcc86 Iustin Pop
2269 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2270 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2271 a8083063 Iustin Pop

2272 a8083063 Iustin Pop
    """
2273 069dcc86 Iustin Pop
    instance_names = self.wanted
2274 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2275 a8083063 Iustin Pop
                     in instance_names]
2276 a8083063 Iustin Pop
2277 a8083063 Iustin Pop
    # begin data gathering
2278 a8083063 Iustin Pop
2279 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2280 a8083063 Iustin Pop
2281 a8083063 Iustin Pop
    bad_nodes = []
2282 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2283 a8083063 Iustin Pop
      live_data = {}
2284 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2285 a8083063 Iustin Pop
      for name in nodes:
2286 a8083063 Iustin Pop
        result = node_data[name]
2287 a8083063 Iustin Pop
        if result:
2288 a8083063 Iustin Pop
          live_data.update(result)
2289 a8083063 Iustin Pop
        elif result == False:
2290 a8083063 Iustin Pop
          bad_nodes.append(name)
2291 a8083063 Iustin Pop
        # else no instance is alive
2292 a8083063 Iustin Pop
    else:
2293 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2294 a8083063 Iustin Pop
2295 a8083063 Iustin Pop
    # end data gathering
2296 a8083063 Iustin Pop
2297 a8083063 Iustin Pop
    output = []
2298 a8083063 Iustin Pop
    for instance in instance_list:
2299 a8083063 Iustin Pop
      iout = []
2300 a8083063 Iustin Pop
      for field in self.op.output_fields:
2301 a8083063 Iustin Pop
        if field == "name":
2302 a8083063 Iustin Pop
          val = instance.name
2303 a8083063 Iustin Pop
        elif field == "os":
2304 a8083063 Iustin Pop
          val = instance.os
2305 a8083063 Iustin Pop
        elif field == "pnode":
2306 a8083063 Iustin Pop
          val = instance.primary_node
2307 a8083063 Iustin Pop
        elif field == "snodes":
2308 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2309 a8083063 Iustin Pop
        elif field == "admin_state":
2310 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2311 a8083063 Iustin Pop
        elif field == "oper_state":
2312 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2313 8a23d2d3 Iustin Pop
            val = None
2314 a8083063 Iustin Pop
          else:
2315 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2316 a8083063 Iustin Pop
        elif field == "admin_ram":
2317 a8083063 Iustin Pop
          val = instance.memory
2318 a8083063 Iustin Pop
        elif field == "oper_ram":
2319 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2320 8a23d2d3 Iustin Pop
            val = None
2321 a8083063 Iustin Pop
          elif instance.name in live_data:
2322 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2323 a8083063 Iustin Pop
          else:
2324 a8083063 Iustin Pop
            val = "-"
2325 a8083063 Iustin Pop
        elif field == "disk_template":
2326 a8083063 Iustin Pop
          val = instance.disk_template
2327 a8083063 Iustin Pop
        elif field == "ip":
2328 a8083063 Iustin Pop
          val = instance.nics[0].ip
2329 a8083063 Iustin Pop
        elif field == "bridge":
2330 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2331 a8083063 Iustin Pop
        elif field == "mac":
2332 a8083063 Iustin Pop
          val = instance.nics[0].mac
2333 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2334 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2335 644eeef9 Iustin Pop
          if disk is None:
2336 8a23d2d3 Iustin Pop
            val = None
2337 644eeef9 Iustin Pop
          else:
2338 644eeef9 Iustin Pop
            val = disk.size
2339 a8083063 Iustin Pop
        else:
2340 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2341 a8083063 Iustin Pop
        iout.append(val)
2342 a8083063 Iustin Pop
      output.append(iout)
2343 a8083063 Iustin Pop
2344 a8083063 Iustin Pop
    return output
2345 a8083063 Iustin Pop
2346 a8083063 Iustin Pop
2347 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2348 a8083063 Iustin Pop
  """Failover an instance.
2349 a8083063 Iustin Pop

2350 a8083063 Iustin Pop
  """
2351 a8083063 Iustin Pop
  HPATH = "instance-failover"
2352 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2353 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2354 a8083063 Iustin Pop
2355 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2356 a8083063 Iustin Pop
    """Build hooks env.
2357 a8083063 Iustin Pop

2358 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2359 a8083063 Iustin Pop

2360 a8083063 Iustin Pop
    """
2361 a8083063 Iustin Pop
    env = {
2362 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2363 a8083063 Iustin Pop
      }
2364 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2365 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2366 a8083063 Iustin Pop
    return env, nl, nl
2367 a8083063 Iustin Pop
2368 a8083063 Iustin Pop
  def CheckPrereq(self):
2369 a8083063 Iustin Pop
    """Check prerequisites.
2370 a8083063 Iustin Pop

2371 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2372 a8083063 Iustin Pop

2373 a8083063 Iustin Pop
    """
2374 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2375 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2376 a8083063 Iustin Pop
    if instance is None:
2377 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2378 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2379 a8083063 Iustin Pop
2380 2a710df1 Michael Hanselmann
    if instance.disk_template != constants.DT_REMOTE_RAID1:
2381 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2382 2a710df1 Michael Hanselmann
                                 " remote_raid1.")
2383 2a710df1 Michael Hanselmann
2384 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2385 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2386 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2387 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2388 2a710df1 Michael Hanselmann
2389 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2390 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2391 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2392 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2393 3a7c308e Guido Trotter
    if not info:
2394 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
2395 3ecf6786 Iustin Pop
                                 " from node '%s'" % nodeinfo)
2396 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2397 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Not enough memory on target node %s."
2398 3ecf6786 Iustin Pop
                                 " %d MB available, %d MB required" %
2399 3ecf6786 Iustin Pop
                                 (target_node, info['memory_free'],
2400 3ecf6786 Iustin Pop
                                  instance.memory))
2401 3a7c308e Guido Trotter
2402 a8083063 Iustin Pop
    # check bridge existance
2403 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2404 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(instance.primary_node, brlist):
2405 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2406 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2407 3ecf6786 Iustin Pop
                                 (brlist, instance.primary_node))
2408 a8083063 Iustin Pop
2409 a8083063 Iustin Pop
    self.instance = instance
2410 a8083063 Iustin Pop
2411 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2412 a8083063 Iustin Pop
    """Failover an instance.
2413 a8083063 Iustin Pop

2414 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2415 a8083063 Iustin Pop
    starting it on the secondary.
2416 a8083063 Iustin Pop

2417 a8083063 Iustin Pop
    """
2418 a8083063 Iustin Pop
    instance = self.instance
2419 a8083063 Iustin Pop
2420 a8083063 Iustin Pop
    source_node = instance.primary_node
2421 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2422 a8083063 Iustin Pop
2423 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2424 a8083063 Iustin Pop
    for dev in instance.disks:
2425 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2426 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2427 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2428 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2429 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2430 a8083063 Iustin Pop
2431 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2432 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2433 a8083063 Iustin Pop
2434 a8083063 Iustin Pop
    if not nodeinfo:
2435 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact target node %s." %
2436 3ecf6786 Iustin Pop
                               target_node)
2437 a8083063 Iustin Pop
2438 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2439 a8083063 Iustin Pop
    memory = instance.memory
2440 a8083063 Iustin Pop
    if memory > free_memory:
2441 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to create instance %s on"
2442 3ecf6786 Iustin Pop
                               " node %s. needed %s MiB, available %s MiB" %
2443 3ecf6786 Iustin Pop
                               (instance.name, target_node, memory,
2444 3ecf6786 Iustin Pop
                                free_memory))
2445 a8083063 Iustin Pop
2446 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2447 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2448 a8083063 Iustin Pop
                (instance.name, source_node))
2449 a8083063 Iustin Pop
2450 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2451 a8083063 Iustin Pop
      logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2452 a8083063 Iustin Pop
                   " anyway. Please make sure node %s is down"  %
2453 a8083063 Iustin Pop
                   (instance.name, source_node, source_node))
2454 a8083063 Iustin Pop
2455 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2456 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2457 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2458 a8083063 Iustin Pop
2459 a8083063 Iustin Pop
    instance.primary_node = target_node
2460 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2461 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2462 a8083063 Iustin Pop
2463 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2464 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2465 a8083063 Iustin Pop
                (instance.name, target_node))
2466 a8083063 Iustin Pop
2467 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2468 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2469 a8083063 Iustin Pop
    if not disks_ok:
2470 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2471 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2472 a8083063 Iustin Pop
2473 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2474 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2475 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2476 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2477 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2478 a8083063 Iustin Pop
2479 a8083063 Iustin Pop
2480 a0c3fea1 Michael Hanselmann
def _CreateBlockDevOnPrimary(cfg, node, device, info):
2481 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2482 a8083063 Iustin Pop

2483 a8083063 Iustin Pop
  This always creates all devices.
2484 a8083063 Iustin Pop

2485 a8083063 Iustin Pop
  """
2486 a8083063 Iustin Pop
  if device.children:
2487 a8083063 Iustin Pop
    for child in device.children:
2488 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnPrimary(cfg, node, child, info):
2489 a8083063 Iustin Pop
        return False
2490 a8083063 Iustin Pop
2491 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2492 a0c3fea1 Michael Hanselmann
  new_id = rpc.call_blockdev_create(node, device, device.size, True, info)
2493 a8083063 Iustin Pop
  if not new_id:
2494 a8083063 Iustin Pop
    return False
2495 a8083063 Iustin Pop
  if device.physical_id is None:
2496 a8083063 Iustin Pop
    device.physical_id = new_id
2497 a8083063 Iustin Pop
  return True
2498 a8083063 Iustin Pop
2499 a8083063 Iustin Pop
2500 a0c3fea1 Michael Hanselmann
def _CreateBlockDevOnSecondary(cfg, node, device, force, info):
2501 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2502 a8083063 Iustin Pop

2503 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2504 a8083063 Iustin Pop
  all its children.
2505 a8083063 Iustin Pop

2506 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2507 a8083063 Iustin Pop

2508 a8083063 Iustin Pop
  """
2509 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2510 a8083063 Iustin Pop
    force = True
2511 a8083063 Iustin Pop
  if device.children:
2512 a8083063 Iustin Pop
    for child in device.children:
2513 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, node, child, force, info):
2514 a8083063 Iustin Pop
        return False
2515 a8083063 Iustin Pop
2516 a8083063 Iustin Pop
  if not force:
2517 a8083063 Iustin Pop
    return True
2518 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2519 a0c3fea1 Michael Hanselmann
  new_id = rpc.call_blockdev_create(node, device, device.size, False, info)
2520 a8083063 Iustin Pop
  if not new_id:
2521 a8083063 Iustin Pop
    return False
2522 a8083063 Iustin Pop
  if device.physical_id is None:
2523 a8083063 Iustin Pop
    device.physical_id = new_id
2524 a8083063 Iustin Pop
  return True
2525 a8083063 Iustin Pop
2526 a8083063 Iustin Pop
2527 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2528 923b1523 Iustin Pop
  """Generate a suitable LV name.
2529 923b1523 Iustin Pop

2530 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2531 923b1523 Iustin Pop

2532 923b1523 Iustin Pop
  """
2533 923b1523 Iustin Pop
  results = []
2534 923b1523 Iustin Pop
  for val in exts:
2535 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2536 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2537 923b1523 Iustin Pop
  return results
2538 923b1523 Iustin Pop
2539 923b1523 Iustin Pop
2540 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2541 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2542 a8083063 Iustin Pop

2543 a8083063 Iustin Pop
  """
2544 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2545 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2546 a8083063 Iustin Pop
  dev_data = objects.Disk(dev_type="lvm", size=size,
2547 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2548 a8083063 Iustin Pop
  dev_meta = objects.Disk(dev_type="lvm", size=128,
2549 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2550 a8083063 Iustin Pop
  drbd_dev = objects.Disk(dev_type="drbd", size=size,
2551 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2552 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2553 a8083063 Iustin Pop
  return drbd_dev
2554 a8083063 Iustin Pop
2555 a8083063 Iustin Pop
2556 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2557 a8083063 Iustin Pop
                          instance_name, primary_node,
2558 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2559 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2560 a8083063 Iustin Pop

2561 a8083063 Iustin Pop
  """
2562 a8083063 Iustin Pop
  #TODO: compute space requirements
2563 a8083063 Iustin Pop
2564 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2565 a8083063 Iustin Pop
  if template_name == "diskless":
2566 a8083063 Iustin Pop
    disks = []
2567 a8083063 Iustin Pop
  elif template_name == "plain":
2568 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2569 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2570 923b1523 Iustin Pop
2571 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2572 a8083063 Iustin Pop
    sda_dev = objects.Disk(dev_type="lvm", size=disk_sz,
2573 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2574 a8083063 Iustin Pop
                           iv_name = "sda")
2575 a8083063 Iustin Pop
    sdb_dev = objects.Disk(dev_type="lvm", size=swap_sz,
2576 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2577 a8083063 Iustin Pop
                           iv_name = "sdb")
2578 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2579 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2580 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2581 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2582 923b1523 Iustin Pop
2583 923b1523 Iustin Pop
2584 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2585 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2586 a8083063 Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type="lvm", size=disk_sz,
2587 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2588 a8083063 Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type="lvm", size=disk_sz,
2589 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2590 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name = "sda",
2591 a8083063 Iustin Pop
                              size=disk_sz,
2592 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2593 a8083063 Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type="lvm", size=swap_sz,
2594 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2595 a8083063 Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type="lvm", size=swap_sz,
2596 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2597 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name = "sdb",
2598 a8083063 Iustin Pop
                              size=swap_sz,
2599 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2600 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2601 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2602 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2603 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2604 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2605 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2606 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2607 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2608 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2609 a8083063 Iustin Pop
    md_sda_dev = objects.Disk(dev_type="md_raid1", iv_name="sda",
2610 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2611 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2612 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2613 a8083063 Iustin Pop
    md_sdb_dev = objects.Disk(dev_type="md_raid1", iv_name="sdb",
2614 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2615 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2616 a8083063 Iustin Pop
  else:
2617 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2618 a8083063 Iustin Pop
  return disks
2619 a8083063 Iustin Pop
2620 a8083063 Iustin Pop
2621 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2622 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2623 3ecf6786 Iustin Pop

2624 3ecf6786 Iustin Pop
  """
2625 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2626 a0c3fea1 Michael Hanselmann
2627 a0c3fea1 Michael Hanselmann
2628 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2629 a8083063 Iustin Pop
  """Create all disks for an instance.
2630 a8083063 Iustin Pop

2631 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2632 a8083063 Iustin Pop

2633 a8083063 Iustin Pop
  Args:
2634 a8083063 Iustin Pop
    instance: the instance object
2635 a8083063 Iustin Pop

2636 a8083063 Iustin Pop
  Returns:
2637 a8083063 Iustin Pop
    True or False showing the success of the creation process
2638 a8083063 Iustin Pop

2639 a8083063 Iustin Pop
  """
2640 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2641 a0c3fea1 Michael Hanselmann
2642 a8083063 Iustin Pop
  for device in instance.disks:
2643 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2644 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2645 a8083063 Iustin Pop
    #HARDCODE
2646 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2647 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, device, False,
2648 a0c3fea1 Michael Hanselmann
                                        info):
2649 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2650 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2651 a8083063 Iustin Pop
        return False
2652 a8083063 Iustin Pop
    #HARDCODE
2653 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, device, info):
2654 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2655 a8083063 Iustin Pop
                   device.iv_name)
2656 a8083063 Iustin Pop
      return False
2657 a8083063 Iustin Pop
  return True
2658 a8083063 Iustin Pop
2659 a8083063 Iustin Pop
2660 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2661 a8083063 Iustin Pop
  """Remove all disks for an instance.
2662 a8083063 Iustin Pop

2663 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2664 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2665 a8083063 Iustin Pop
  be remove, the removal will continue with the other ones (compare
2666 a8083063 Iustin Pop
  with `_CreateDisks()`).
2667 a8083063 Iustin Pop

2668 a8083063 Iustin Pop
  Args:
2669 a8083063 Iustin Pop
    instance: the instance object
2670 a8083063 Iustin Pop

2671 a8083063 Iustin Pop
  Returns:
2672 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2673 a8083063 Iustin Pop

2674 a8083063 Iustin Pop
  """
2675 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2676 a8083063 Iustin Pop
2677 a8083063 Iustin Pop
  result = True
2678 a8083063 Iustin Pop
  for device in instance.disks:
2679 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2680 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2681 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2682 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2683 a8083063 Iustin Pop
                     " continuing anyway" %
2684 a8083063 Iustin Pop
                     (device.iv_name, node))
2685 a8083063 Iustin Pop
        result = False
2686 a8083063 Iustin Pop
  return result
2687 a8083063 Iustin Pop
2688 a8083063 Iustin Pop
2689 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2690 a8083063 Iustin Pop
  """Create an instance.
2691 a8083063 Iustin Pop

2692 a8083063 Iustin Pop
  """
2693 a8083063 Iustin Pop
  HPATH = "instance-add"
2694 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2695 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2696 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2697 a8083063 Iustin Pop
              "wait_for_sync"]
2698 a8083063 Iustin Pop
2699 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2700 a8083063 Iustin Pop
    """Build hooks env.
2701 a8083063 Iustin Pop

2702 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2703 a8083063 Iustin Pop

2704 a8083063 Iustin Pop
    """
2705 a8083063 Iustin Pop
    env = {
2706 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2707 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2708 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2709 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2710 a8083063 Iustin Pop
      }
2711 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2712 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2713 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2714 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2715 396e1b78 Michael Hanselmann
2716 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2717 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2718 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2719 396e1b78 Michael Hanselmann
      status=self.instance_status,
2720 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2721 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2722 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2723 396e1b78 Michael Hanselmann
      nics=[(self.inst_ip, self.op.bridge)],
2724 396e1b78 Michael Hanselmann
    ))
2725 a8083063 Iustin Pop
2726 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2727 a8083063 Iustin Pop
          self.secondaries)
2728 a8083063 Iustin Pop
    return env, nl, nl
2729 a8083063 Iustin Pop
2730 a8083063 Iustin Pop
2731 a8083063 Iustin Pop
  def CheckPrereq(self):
2732 a8083063 Iustin Pop
    """Check prerequisites.
2733 a8083063 Iustin Pop

2734 a8083063 Iustin Pop
    """
2735 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2736 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2737 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2738 3ecf6786 Iustin Pop
                                 self.op.mode)
2739 a8083063 Iustin Pop
2740 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2741 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2742 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2743 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2744 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2745 3ecf6786 Iustin Pop
                                   " node and path options")
2746 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2747 a8083063 Iustin Pop
      if src_node_full is None:
2748 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2749 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2750 a8083063 Iustin Pop
2751 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2752 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2753 a8083063 Iustin Pop
2754 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2755 a8083063 Iustin Pop
2756 a8083063 Iustin Pop
      if not export_info:
2757 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2758 a8083063 Iustin Pop
2759 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2760 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2761 a8083063 Iustin Pop
2762 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2763 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2764 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2765 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2766 a8083063 Iustin Pop
2767 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2768 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2769 3ecf6786 Iustin Pop
                                   " one data disk")
2770 a8083063 Iustin Pop
2771 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2772 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2773 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2774 a8083063 Iustin Pop
                                                         'disk0_dump'))
2775 a8083063 Iustin Pop
      self.src_image = diskimage
2776 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2777 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2778 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2779 a8083063 Iustin Pop
2780 a8083063 Iustin Pop
    # check primary node
2781 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2782 a8083063 Iustin Pop
    if pnode is None:
2783 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2784 3ecf6786 Iustin Pop
                                 self.op.pnode)
2785 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2786 a8083063 Iustin Pop
    self.pnode = pnode
2787 a8083063 Iustin Pop
    self.secondaries = []
2788 a8083063 Iustin Pop
    # disk template and mirror node verification
2789 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2790 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2791 a8083063 Iustin Pop
2792 a8083063 Iustin Pop
    if self.op.disk_template == constants.DT_REMOTE_RAID1:
2793 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2794 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The 'remote_raid1' disk template needs"
2795 3ecf6786 Iustin Pop
                                   " a mirror node")
2796 a8083063 Iustin Pop
2797 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2798 a8083063 Iustin Pop
      if snode_name is None:
2799 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2800 3ecf6786 Iustin Pop
                                   self.op.snode)
2801 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2802 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
2803 3ecf6786 Iustin Pop
                                   " the primary node.")
2804 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2805 a8083063 Iustin Pop
2806 ed1ebc60 Guido Trotter
    # Check lv size requirements
2807 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2808 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2809 ed1ebc60 Guido Trotter
2810 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2811 ed1ebc60 Guido Trotter
    req_size_dict = {
2812 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2813 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2814 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2815 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2816 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2817 ed1ebc60 Guido Trotter
    }
2818 ed1ebc60 Guido Trotter
2819 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2820 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
2821 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
2822 ed1ebc60 Guido Trotter
2823 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2824 ed1ebc60 Guido Trotter
2825 ed1ebc60 Guido Trotter
    for node in nodenames:
2826 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2827 ed1ebc60 Guido Trotter
      if not info:
2828 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
2829 3ecf6786 Iustin Pop
                                   " from node '%s'" % nodeinfo)
2830 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2831 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s."
2832 3ecf6786 Iustin Pop
                                   " %d MB available, %d MB required" %
2833 3ecf6786 Iustin Pop
                                   (node, info['vg_free'], req_size))
2834 ed1ebc60 Guido Trotter
2835 a8083063 Iustin Pop
    # os verification
2836 a8083063 Iustin Pop
    os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2837 a8083063 Iustin Pop
    if not isinstance(os_obj, objects.OS):
2838 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
2839 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
2840 a8083063 Iustin Pop
2841 a8083063 Iustin Pop
    # instance verification
2842 a8083063 Iustin Pop
    hostname1 = utils.LookupHostname(self.op.instance_name)
2843 a8083063 Iustin Pop
    if not hostname1:
2844 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance name '%s' not found in dns" %
2845 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2846 a8083063 Iustin Pop
2847 bcf043c9 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
2848 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2849 a8083063 Iustin Pop
    if instance_name in instance_list:
2850 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2851 3ecf6786 Iustin Pop
                                 instance_name)
2852 a8083063 Iustin Pop
2853 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2854 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2855 a8083063 Iustin Pop
      inst_ip = None
2856 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2857 bcf043c9 Iustin Pop
      inst_ip = hostname1.ip
2858 a8083063 Iustin Pop
    else:
2859 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2860 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
2861 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
2862 a8083063 Iustin Pop
      inst_ip = ip
2863 a8083063 Iustin Pop
    self.inst_ip = inst_ip
2864 a8083063 Iustin Pop
2865 bcf043c9 Iustin Pop
    command = ["fping", "-q", hostname1.ip]
2866 a8083063 Iustin Pop
    result = utils.RunCmd(command)
2867 a8083063 Iustin Pop
    if not result.failed:
2868 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("IP %s of instance %s already in use" %
2869 bcf043c9 Iustin Pop
                                 (hostname1.ip, instance_name))
2870 a8083063 Iustin Pop
2871 a8083063 Iustin Pop
    # bridge verification
2872 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
2873 a8083063 Iustin Pop
    if bridge is None:
2874 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
2875 a8083063 Iustin Pop
    else:
2876 a8083063 Iustin Pop
      self.op.bridge = bridge
2877 a8083063 Iustin Pop
2878 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
2879 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
2880 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
2881 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
2882 a8083063 Iustin Pop
2883 a8083063 Iustin Pop
    if self.op.start:
2884 a8083063 Iustin Pop
      self.instance_status = 'up'
2885 a8083063 Iustin Pop
    else:
2886 a8083063 Iustin Pop
      self.instance_status = 'down'
2887 a8083063 Iustin Pop
2888 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2889 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
2890 a8083063 Iustin Pop

2891 a8083063 Iustin Pop
    """
2892 a8083063 Iustin Pop
    instance = self.op.instance_name
2893 a8083063 Iustin Pop
    pnode_name = self.pnode.name
2894 a8083063 Iustin Pop
2895 a8083063 Iustin Pop
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
2896 a8083063 Iustin Pop
    if self.inst_ip is not None:
2897 a8083063 Iustin Pop
      nic.ip = self.inst_ip
2898 a8083063 Iustin Pop
2899 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
2900 a8083063 Iustin Pop
                                  self.op.disk_template,
2901 a8083063 Iustin Pop
                                  instance, pnode_name,
2902 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
2903 a8083063 Iustin Pop
                                  self.op.swap_size)
2904 a8083063 Iustin Pop
2905 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
2906 a8083063 Iustin Pop
                            primary_node=pnode_name,
2907 a8083063 Iustin Pop
                            memory=self.op.mem_size,
2908 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
2909 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
2910 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
2911 a8083063 Iustin Pop
                            status=self.instance_status,
2912 a8083063 Iustin Pop
                            )
2913 a8083063 Iustin Pop
2914 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
2915 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
2916 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2917 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
2918 a8083063 Iustin Pop
2919 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
2920 a8083063 Iustin Pop
2921 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
2922 a8083063 Iustin Pop
2923 a8083063 Iustin Pop
    if self.op.wait_for_sync:
2924 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj)
2925 2a710df1 Michael Hanselmann
    elif iobj.disk_template == constants.DT_REMOTE_RAID1:
2926 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
2927 a8083063 Iustin Pop
      time.sleep(15)
2928 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
2929 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
2930 a8083063 Iustin Pop
    else:
2931 a8083063 Iustin Pop
      disk_abort = False
2932 a8083063 Iustin Pop
2933 a8083063 Iustin Pop
    if disk_abort:
2934 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
2935 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
2936 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
2937 3ecf6786 Iustin Pop
                               " this instance")
2938 a8083063 Iustin Pop
2939 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
2940 a8083063 Iustin Pop
                (instance, pnode_name))
2941 a8083063 Iustin Pop
2942 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
2943 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
2944 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
2945 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
2946 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
2947 3ecf6786 Iustin Pop
                                   " on node %s" %
2948 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
2949 a8083063 Iustin Pop
2950 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
2951 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
2952 a8083063 Iustin Pop
        src_node = self.op.src_node
2953 a8083063 Iustin Pop
        src_image = self.src_image
2954 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
2955 a8083063 Iustin Pop
                                                src_node, src_image):
2956 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
2957 3ecf6786 Iustin Pop
                                   " %s on node %s" %
2958 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
2959 a8083063 Iustin Pop
      else:
2960 a8083063 Iustin Pop
        # also checked in the prereq part
2961 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
2962 3ecf6786 Iustin Pop
                                     % self.op.mode)
2963 a8083063 Iustin Pop
2964 a8083063 Iustin Pop
    if self.op.start:
2965 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
2966 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
2967 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
2968 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
2969 a8083063 Iustin Pop
2970 a8083063 Iustin Pop
2971 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
2972 a8083063 Iustin Pop
  """Connect to an instance's console.
2973 a8083063 Iustin Pop

2974 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
2975 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
2976 a8083063 Iustin Pop
  console.
2977 a8083063 Iustin Pop

2978 a8083063 Iustin Pop
  """
2979 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2980 a8083063 Iustin Pop
2981 a8083063 Iustin Pop
  def CheckPrereq(self):
2982 a8083063 Iustin Pop
    """Check prerequisites.
2983 a8083063 Iustin Pop

2984 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2985 a8083063 Iustin Pop

2986 a8083063 Iustin Pop
    """
2987 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2988 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2989 a8083063 Iustin Pop
    if instance is None:
2990 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2991 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2992 a8083063 Iustin Pop
    self.instance = instance
2993 a8083063 Iustin Pop
2994 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2995 a8083063 Iustin Pop
    """Connect to the console of an instance
2996 a8083063 Iustin Pop

2997 a8083063 Iustin Pop
    """
2998 a8083063 Iustin Pop
    instance = self.instance
2999 a8083063 Iustin Pop
    node = instance.primary_node
3000 a8083063 Iustin Pop
3001 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3002 a8083063 Iustin Pop
    if node_insts is False:
3003 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3004 a8083063 Iustin Pop
3005 a8083063 Iustin Pop
    if instance.name not in node_insts:
3006 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3007 a8083063 Iustin Pop
3008 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3009 a8083063 Iustin Pop
3010 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3011 a8083063 Iustin Pop
    console_cmd = hyper.GetShellCommandForConsole(instance.name)
3012 82122173 Iustin Pop
    # build ssh cmdline
3013 82122173 Iustin Pop
    argv = ["ssh", "-q", "-t"]
3014 82122173 Iustin Pop
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
3015 82122173 Iustin Pop
    argv.extend(ssh.BATCH_MODE_OPTS)
3016 82122173 Iustin Pop
    argv.append(node)
3017 82122173 Iustin Pop
    argv.append(console_cmd)
3018 82122173 Iustin Pop
    return "ssh", argv
3019 a8083063 Iustin Pop
3020 a8083063 Iustin Pop
3021 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
3022 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
3023 a8083063 Iustin Pop

3024 a8083063 Iustin Pop
  """
3025 a8083063 Iustin Pop
  HPATH = "mirror-add"
3026 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3027 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3028 a8083063 Iustin Pop
3029 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3030 a8083063 Iustin Pop
    """Build hooks env.
3031 a8083063 Iustin Pop

3032 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3033 a8083063 Iustin Pop

3034 a8083063 Iustin Pop
    """
3035 a8083063 Iustin Pop
    env = {
3036 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3037 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3038 a8083063 Iustin Pop
      }
3039 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3040 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3041 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3042 a8083063 Iustin Pop
    return env, nl, nl
3043 a8083063 Iustin Pop
3044 a8083063 Iustin Pop
  def CheckPrereq(self):
3045 a8083063 Iustin Pop
    """Check prerequisites.
3046 a8083063 Iustin Pop

3047 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3048 a8083063 Iustin Pop

3049 a8083063 Iustin Pop
    """
3050 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3051 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3052 a8083063 Iustin Pop
    if instance is None:
3053 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3054 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3055 a8083063 Iustin Pop
    self.instance = instance
3056 a8083063 Iustin Pop
3057 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3058 a8083063 Iustin Pop
    if remote_node is None:
3059 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3060 a8083063 Iustin Pop
    self.remote_node = remote_node
3061 a8083063 Iustin Pop
3062 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3063 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3064 3ecf6786 Iustin Pop
                                 " the instance.")
3065 a8083063 Iustin Pop
3066 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3067 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3068 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3069 a8083063 Iustin Pop
    for disk in instance.disks:
3070 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3071 a8083063 Iustin Pop
        break
3072 a8083063 Iustin Pop
    else:
3073 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3074 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3075 a8083063 Iustin Pop
    if len(disk.children) > 1:
3076 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The device already has two slave"
3077 3ecf6786 Iustin Pop
                                 " devices.\n"
3078 3ecf6786 Iustin Pop
                                 "This would create a 3-disk raid1"
3079 3ecf6786 Iustin Pop
                                 " which we don't allow.")
3080 a8083063 Iustin Pop
    self.disk = disk
3081 a8083063 Iustin Pop
3082 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3083 a8083063 Iustin Pop
    """Add the mirror component
3084 a8083063 Iustin Pop

3085 a8083063 Iustin Pop
    """
3086 a8083063 Iustin Pop
    disk = self.disk
3087 a8083063 Iustin Pop
    instance = self.instance
3088 a8083063 Iustin Pop
3089 a8083063 Iustin Pop
    remote_node = self.remote_node
3090 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3091 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
3092 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3093 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
3094 a8083063 Iustin Pop
3095 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
3096 a8083063 Iustin Pop
    #HARDCODE
3097 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, new_drbd, False,
3098 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3099 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
3100 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
3101 a8083063 Iustin Pop
3102 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
3103 a8083063 Iustin Pop
    #HARDCODE
3104 a0c3fea1 Michael Hanselmann
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node, new_drbd,
3105 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
3106 a8083063 Iustin Pop
      # remove secondary dev
3107 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3108 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
3109 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
3110 a8083063 Iustin Pop
3111 a8083063 Iustin Pop
    # the device exists now
3112 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
3113 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
3114 a8083063 Iustin Pop
    if not rpc.call_blockdev_addchild(instance.primary_node,
3115 a8083063 Iustin Pop
                                           disk, new_drbd):
3116 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
3117 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3118 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3119 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
3120 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3121 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3122 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
3123 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
3124 a8083063 Iustin Pop
3125 a8083063 Iustin Pop
    disk.children.append(new_drbd)
3126 a8083063 Iustin Pop
3127 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3128 a8083063 Iustin Pop
3129 a8083063 Iustin Pop
    _WaitForSync(self.cfg, instance)
3130 a8083063 Iustin Pop
3131 a8083063 Iustin Pop
    return 0
3132 a8083063 Iustin Pop
3133 a8083063 Iustin Pop
3134 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
3135 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
3136 a8083063 Iustin Pop

3137 a8083063 Iustin Pop
  """
3138 a8083063 Iustin Pop
  HPATH = "mirror-remove"
3139 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3140 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3141 a8083063 Iustin Pop
3142 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3143 a8083063 Iustin Pop
    """Build hooks env.
3144 a8083063 Iustin Pop

3145 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3146 a8083063 Iustin Pop

3147 a8083063 Iustin Pop
    """
3148 a8083063 Iustin Pop
    env = {
3149 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3150 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
3151 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
3152 a8083063 Iustin Pop
      }
3153 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3154 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3155 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3156 a8083063 Iustin Pop
    return env, nl, nl
3157 a8083063 Iustin Pop
3158 a8083063 Iustin Pop
  def CheckPrereq(self):
3159 a8083063 Iustin Pop
    """Check prerequisites.
3160 a8083063 Iustin Pop

3161 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3162 a8083063 Iustin Pop

3163 a8083063 Iustin Pop
    """
3164 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3165 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3166 a8083063 Iustin Pop
    if instance is None:
3167 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3168 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3169 a8083063 Iustin Pop
    self.instance = instance
3170 a8083063 Iustin Pop
3171 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3172 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3173 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3174 a8083063 Iustin Pop
    for disk in instance.disks:
3175 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3176 a8083063 Iustin Pop
        break
3177 a8083063 Iustin Pop
    else:
3178 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3179 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3180 a8083063 Iustin Pop
    for child in disk.children:
3181 a8083063 Iustin Pop
      if child.dev_type == "drbd" and child.logical_id[2] == self.op.disk_id:
3182 a8083063 Iustin Pop
        break
3183 a8083063 Iustin Pop
    else:
3184 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3185 a8083063 Iustin Pop
3186 a8083063 Iustin Pop
    if len(disk.children) < 2:
3187 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3188 3ecf6786 Iustin Pop
                                 " a mirror.")
3189 a8083063 Iustin Pop
    self.disk = disk
3190 a8083063 Iustin Pop
    self.child = child
3191 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3192 a8083063 Iustin Pop
      oid = 1
3193 a8083063 Iustin Pop
    else:
3194 a8083063 Iustin Pop
      oid = 0
3195 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3196 a8083063 Iustin Pop
3197 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3198 a8083063 Iustin Pop
    """Remove the mirror component
3199 a8083063 Iustin Pop

3200 a8083063 Iustin Pop
    """
3201 a8083063 Iustin Pop
    instance = self.instance
3202 a8083063 Iustin Pop
    disk = self.disk
3203 a8083063 Iustin Pop
    child = self.child
3204 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3205 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3206 a8083063 Iustin Pop
    if not rpc.call_blockdev_removechild(instance.primary_node,
3207 a8083063 Iustin Pop
                                              disk, child):
3208 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3209 a8083063 Iustin Pop
3210 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3211 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3212 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3213 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3214 a8083063 Iustin Pop
                     " continuing operation." % node)
3215 a8083063 Iustin Pop
3216 a8083063 Iustin Pop
    disk.children.remove(child)
3217 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3218 a8083063 Iustin Pop
3219 a8083063 Iustin Pop
3220 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3221 a8083063 Iustin Pop
  """Replace the disks of an instance.
3222 a8083063 Iustin Pop

3223 a8083063 Iustin Pop
  """
3224 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3225 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3226 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3227 a8083063 Iustin Pop
3228 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3229 a8083063 Iustin Pop
    """Build hooks env.
3230 a8083063 Iustin Pop

3231 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3232 a8083063 Iustin Pop

3233 a8083063 Iustin Pop
    """
3234 a8083063 Iustin Pop
    env = {
3235 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3236 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3237 a8083063 Iustin Pop
      }
3238 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3239 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3240 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3241 a8083063 Iustin Pop
    return env, nl, nl
3242 a8083063 Iustin Pop
3243 a8083063 Iustin Pop
  def CheckPrereq(self):
3244 a8083063 Iustin Pop
    """Check prerequisites.
3245 a8083063 Iustin Pop

3246 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3247 a8083063 Iustin Pop

3248 a8083063 Iustin Pop
    """
3249 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3250 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3251 a8083063 Iustin Pop
    if instance is None:
3252 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3253 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3254 a8083063 Iustin Pop
    self.instance = instance
3255 a8083063 Iustin Pop
3256 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3257 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3258 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3259 a8083063 Iustin Pop
3260 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3261 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3262 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3263 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3264 a8083063 Iustin Pop
3265 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3266 a8083063 Iustin Pop
    if remote_node is None:
3267 a8083063 Iustin Pop
      remote_node = instance.secondary_nodes[0]
3268 a8083063 Iustin Pop
    else:
3269 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3270 a8083063 Iustin Pop
      if remote_node is None:
3271 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3272 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3273 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3274 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3275 3ecf6786 Iustin Pop
                                 " the instance.")
3276 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3277 a8083063 Iustin Pop
3278 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3279 a8083063 Iustin Pop
    """Replace the disks of an instance.
3280 a8083063 Iustin Pop

3281 a8083063 Iustin Pop
    """
3282 a8083063 Iustin Pop
    instance = self.instance
3283 a8083063 Iustin Pop
    iv_names = {}
3284 a8083063 Iustin Pop
    # start of work
3285 a8083063 Iustin Pop
    remote_node = self.op.remote_node
3286 a8083063 Iustin Pop
    cfg = self.cfg
3287 a8083063 Iustin Pop
    for dev in instance.disks:
3288 a8083063 Iustin Pop
      size = dev.size
3289 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3290 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3291 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3292 923b1523 Iustin Pop
                                       remote_node, size, names)
3293 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3294 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3295 a8083063 Iustin Pop
                  dev.iv_name)
3296 a8083063 Iustin Pop
      #HARDCODE
3297 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnSecondary(cfg, remote_node, new_drbd, False,
3298 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3299 3ecf6786 Iustin Pop
        raise errors.OpExecError("Failed to create new component on"
3300 3ecf6786 Iustin Pop
                                 " secondary node %s\n"
3301 3ecf6786 Iustin Pop
                                 "Full abort, cleanup manually!" %
3302 3ecf6786 Iustin Pop
                                 remote_node)
3303 a8083063 Iustin Pop
3304 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3305 a8083063 Iustin Pop
      #HARDCODE
3306 a0c3fea1 Michael Hanselmann
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node, new_drbd,
3307 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3308 a8083063 Iustin Pop
        # remove secondary dev
3309 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3310 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3311 a8083063 Iustin Pop
        raise errors.OpExecError("Failed to create volume on primary!\n"
3312 a8083063 Iustin Pop
                                 "Full abort, cleanup manually!!")
3313 a8083063 Iustin Pop
3314 a8083063 Iustin Pop
      # the device exists now
3315 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3316 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3317 a8083063 Iustin Pop
      if not rpc.call_blockdev_addchild(instance.primary_node, dev,
3318 880478f8 Iustin Pop
                                        new_drbd):
3319 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3320 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3321 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3322 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3323 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3324 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3325 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3326 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3327 a8083063 Iustin Pop
3328 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3329 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3330 a8083063 Iustin Pop
3331 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3332 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3333 a8083063 Iustin Pop
    # return value
3334 a8083063 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3335 a8083063 Iustin Pop
3336 a8083063 Iustin Pop
    # so check manually all the devices
3337 a8083063 Iustin Pop
    for name in iv_names:
3338 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3339 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3340 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3341 a8083063 Iustin Pop
      if is_degr:
3342 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3343 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3344 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3345 a8083063 Iustin Pop
      if is_degr:
3346 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3347 a8083063 Iustin Pop
3348 a8083063 Iustin Pop
    for name in iv_names:
3349 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3350 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3351 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3352 a8083063 Iustin Pop
      if not rpc.call_blockdev_removechild(instance.primary_node,
3353 a8083063 Iustin Pop
                                                dev, child):
3354 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3355 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3356 a8083063 Iustin Pop
        continue
3357 a8083063 Iustin Pop
3358 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3359 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3360 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3361 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3362 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3363 a8083063 Iustin Pop
                       " continuing operation." % node)
3364 a8083063 Iustin Pop
3365 a8083063 Iustin Pop
      dev.children.remove(child)
3366 a8083063 Iustin Pop
3367 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3368 a8083063 Iustin Pop
3369 a8083063 Iustin Pop
3370 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3371 a8083063 Iustin Pop
  """Query runtime instance data.
3372 a8083063 Iustin Pop

3373 a8083063 Iustin Pop
  """
3374 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3375 a8083063 Iustin Pop
3376 a8083063 Iustin Pop
  def CheckPrereq(self):
3377 a8083063 Iustin Pop
    """Check prerequisites.
3378 a8083063 Iustin Pop

3379 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3380 a8083063 Iustin Pop

3381 a8083063 Iustin Pop
    """
3382 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3383 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3384 a8083063 Iustin Pop
    if self.op.instances:
3385 a8083063 Iustin Pop
      self.wanted_instances = []
3386 a8083063 Iustin Pop
      names = self.op.instances
3387 a8083063 Iustin Pop
      for name in names:
3388 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3389 a8083063 Iustin Pop
        if instance is None:
3390 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3391 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3392 a8083063 Iustin Pop
    else:
3393 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3394 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3395 a8083063 Iustin Pop
    return
3396 a8083063 Iustin Pop
3397 a8083063 Iustin Pop
3398 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3399 a8083063 Iustin Pop
    """Compute block device status.
3400 a8083063 Iustin Pop

3401 a8083063 Iustin Pop
    """
3402 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3403 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3404 a8083063 Iustin Pop
    if dev.dev_type == "drbd":
3405 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3406 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3407 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3408 a8083063 Iustin Pop
      else:
3409 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3410 a8083063 Iustin Pop
3411 a8083063 Iustin Pop
    if snode:
3412 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3413 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3414 a8083063 Iustin Pop
    else:
3415 a8083063 Iustin Pop
      dev_sstatus = None
3416 a8083063 Iustin Pop
3417 a8083063 Iustin Pop
    if dev.children:
3418 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3419 a8083063 Iustin Pop
                      for child in dev.children]
3420 a8083063 Iustin Pop
    else:
3421 a8083063 Iustin Pop
      dev_children = []
3422 a8083063 Iustin Pop
3423 a8083063 Iustin Pop
    data = {
3424 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3425 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3426 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3427 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3428 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3429 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3430 a8083063 Iustin Pop
      "children": dev_children,
3431 a8083063 Iustin Pop
      }
3432 a8083063 Iustin Pop
3433 a8083063 Iustin Pop
    return data
3434 a8083063 Iustin Pop
3435 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3436 a8083063 Iustin Pop
    """Gather and return data"""
3437 a8083063 Iustin Pop
    result = {}
3438 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3439 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3440 a8083063 Iustin Pop
                                                instance.name)
3441 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3442 a8083063 Iustin Pop
        remote_state = "up"
3443 a8083063 Iustin Pop
      else:
3444 a8083063 Iustin Pop
        remote_state = "down"
3445 a8083063 Iustin Pop
      if instance.status == "down":
3446 a8083063 Iustin Pop
        config_state = "down"
3447 a8083063 Iustin Pop
      else:
3448 a8083063 Iustin Pop
        config_state = "up"
3449 a8083063 Iustin Pop
3450 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3451 a8083063 Iustin Pop
               for device in instance.disks]
3452 a8083063 Iustin Pop
3453 a8083063 Iustin Pop
      idict = {
3454 a8083063 Iustin Pop
        "name": instance.name,
3455 a8083063 Iustin Pop
        "config_state": config_state,
3456 a8083063 Iustin Pop
        "run_state": remote_state,
3457 a8083063 Iustin Pop
        "pnode": instance.primary_node,
3458 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
3459 a8083063 Iustin Pop
        "os": instance.os,
3460 a8083063 Iustin Pop
        "memory": instance.memory,
3461 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3462 a8083063 Iustin Pop
        "disks": disks,
3463 a8083063 Iustin Pop
        }
3464 a8083063 Iustin Pop
3465 a8083063 Iustin Pop
      result[instance.name] = idict
3466 a8083063 Iustin Pop
3467 a8083063 Iustin Pop
    return result
3468 a8083063 Iustin Pop
3469 a8083063 Iustin Pop
3470 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
3471 a8083063 Iustin Pop
  """Modifies an instances's parameters.
3472 a8083063 Iustin Pop

3473 a8083063 Iustin Pop
  """
3474 a8083063 Iustin Pop
  HPATH = "instance-modify"
3475 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3476 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3477 a8083063 Iustin Pop
3478 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3479 a8083063 Iustin Pop
    """Build hooks env.
3480 a8083063 Iustin Pop

3481 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
3482 a8083063 Iustin Pop

3483 a8083063 Iustin Pop
    """
3484 396e1b78 Michael Hanselmann
    args = dict()
3485 a8083063 Iustin Pop
    if self.mem:
3486 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
3487 a8083063 Iustin Pop
    if self.vcpus:
3488 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
3489 396e1b78 Michael Hanselmann
    if self.do_ip or self.do_bridge:
3490 396e1b78 Michael Hanselmann
      if self.do_ip:
3491 396e1b78 Michael Hanselmann
        ip = self.ip
3492 396e1b78 Michael Hanselmann
      else:
3493 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
3494 396e1b78 Michael Hanselmann
      if self.bridge:
3495 396e1b78 Michael Hanselmann
        bridge = self.bridge
3496 396e1b78 Michael Hanselmann
      else:
3497 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
3498 396e1b78 Michael Hanselmann
      args['nics'] = [(ip, bridge)]
3499 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
3500 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3501 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3502 a8083063 Iustin Pop
    return env, nl, nl
3503 a8083063 Iustin Pop
3504 a8083063 Iustin Pop
  def CheckPrereq(self):
3505 a8083063 Iustin Pop
    """Check prerequisites.
3506 a8083063 Iustin Pop

3507 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
3508 a8083063 Iustin Pop

3509 a8083063 Iustin Pop
    """
3510 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
3511 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
3512 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
3513 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
3514 a8083063 Iustin Pop
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
3515 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
3516 a8083063 Iustin Pop
    if self.mem is not None:
3517 a8083063 Iustin Pop
      try:
3518 a8083063 Iustin Pop
        self.mem = int(self.mem)
3519 a8083063 Iustin Pop
      except ValueError, err:
3520 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
3521 a8083063 Iustin Pop
    if self.vcpus is not None:
3522 a8083063 Iustin Pop
      try:
3523 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
3524 a8083063 Iustin Pop
      except ValueError, err:
3525 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
3526 a8083063 Iustin Pop
    if self.ip is not None:
3527 a8083063 Iustin Pop
      self.do_ip = True
3528 a8083063 Iustin Pop
      if self.ip.lower() == "none":
3529 a8083063 Iustin Pop
        self.ip = None
3530 a8083063 Iustin Pop
      else:
3531 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
3532 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
3533 a8083063 Iustin Pop
    else:
3534 a8083063 Iustin Pop
      self.do_ip = False
3535 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
3536 a8083063 Iustin Pop
3537 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3538 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3539 a8083063 Iustin Pop
    if instance is None:
3540 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
3541 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3542 a8083063 Iustin Pop
    self.op.instance_name = instance.name
3543 a8083063 Iustin Pop
    self.instance = instance
3544 a8083063 Iustin Pop
    return
3545 a8083063 Iustin Pop
3546 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3547 a8083063 Iustin Pop
    """Modifies an instance.
3548 a8083063 Iustin Pop

3549 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
3550 a8083063 Iustin Pop
    """
3551 a8083063 Iustin Pop
    result = []
3552 a8083063 Iustin Pop
    instance = self.instance
3553 a8083063 Iustin Pop
    if self.mem:
3554 a8083063 Iustin Pop
      instance.memory = self.mem
3555 a8083063 Iustin Pop
      result.append(("mem", self.mem))
3556 a8083063 Iustin Pop
    if self.vcpus:
3557 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
3558 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
3559 a8083063 Iustin Pop
    if self.do_ip:
3560 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
3561 a8083063 Iustin Pop
      result.append(("ip", self.ip))
3562 a8083063 Iustin Pop
    if self.bridge:
3563 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
3564 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
3565 a8083063 Iustin Pop
3566 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3567 a8083063 Iustin Pop
3568 a8083063 Iustin Pop
    return result
3569 a8083063 Iustin Pop
3570 a8083063 Iustin Pop
3571 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
3572 a8083063 Iustin Pop
  """Query the exports list
3573 a8083063 Iustin Pop

3574 a8083063 Iustin Pop
  """
3575 a8083063 Iustin Pop
  _OP_REQP = []
3576 a8083063 Iustin Pop
3577 a8083063 Iustin Pop
  def CheckPrereq(self):
3578 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
3579 a8083063 Iustin Pop

3580 a8083063 Iustin Pop
    """
3581 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
3582 a8083063 Iustin Pop
3583 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3584 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
3585 a8083063 Iustin Pop

3586 a8083063 Iustin Pop
    Returns:
3587 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
3588 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
3589 a8083063 Iustin Pop
      that node.
3590 a8083063 Iustin Pop

3591 a8083063 Iustin Pop
    """
3592 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
3593 a8083063 Iustin Pop
3594 a8083063 Iustin Pop
3595 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
3596 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
3597 a8083063 Iustin Pop

3598 a8083063 Iustin Pop
  """
3599 a8083063 Iustin Pop
  HPATH = "instance-export"
3600 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3601 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
3602 a8083063 Iustin Pop
3603 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3604 a8083063 Iustin Pop
    """Build hooks env.
3605 a8083063 Iustin Pop

3606 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
3607 a8083063 Iustin Pop

3608 a8083063 Iustin Pop
    """
3609 a8083063 Iustin Pop
    env = {
3610 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
3611 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
3612 a8083063 Iustin Pop
      }
3613 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3614 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3615 a8083063 Iustin Pop
          self.op.target_node]
3616 a8083063 Iustin Pop
    return env, nl, nl
3617 a8083063 Iustin Pop
3618 a8083063 Iustin Pop
  def CheckPrereq(self):
3619 a8083063 Iustin Pop
    """Check prerequisites.
3620 a8083063 Iustin Pop

3621 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
3622 a8083063 Iustin Pop

3623 a8083063 Iustin Pop
    """
3624 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
3625 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
3626 a8083063 Iustin Pop
    if self.instance is None:
3627 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
3628 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3629 a8083063 Iustin Pop
3630 a8083063 Iustin Pop
    # node verification
3631 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
3632 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
3633 a8083063 Iustin Pop
3634 a8083063 Iustin Pop
    if self.dst_node is None:
3635 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
3636 3ecf6786 Iustin Pop
                                 self.op.target_node)
3637 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
3638 a8083063 Iustin Pop
3639 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3640 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
3641 a8083063 Iustin Pop

3642 a8083063 Iustin Pop
    """
3643 a8083063 Iustin Pop
    instance = self.instance
3644 a8083063 Iustin Pop
    dst_node = self.dst_node
3645 a8083063 Iustin Pop
    src_node = instance.primary_node
3646 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
3647 a8083063 Iustin Pop
    if self.op.shutdown:
3648 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
3649 a8083063 Iustin Pop
      self.processor.ChainOpCode(op, feedback_fn)
3650 a8083063 Iustin Pop
3651 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
3652 a8083063 Iustin Pop
3653 a8083063 Iustin Pop
    snap_disks = []
3654 a8083063 Iustin Pop
3655 a8083063 Iustin Pop
    try:
3656 a8083063 Iustin Pop
      for disk in instance.disks:
3657 a8083063 Iustin Pop
        if disk.iv_name == "sda":
3658 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
3659 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
3660 a8083063 Iustin Pop
3661 a8083063 Iustin Pop
          if not new_dev_name:
3662 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
3663 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
3664 a8083063 Iustin Pop
          else:
3665 a8083063 Iustin Pop
            new_dev = objects.Disk(dev_type="lvm", size=disk.size,
3666 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
3667 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
3668 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
3669 a8083063 Iustin Pop
            snap_disks.append(new_dev)
3670 a8083063 Iustin Pop
3671 a8083063 Iustin Pop
    finally:
3672 a8083063 Iustin Pop
      if self.op.shutdown:
3673 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
3674 a8083063 Iustin Pop
                                       force=False)
3675 a8083063 Iustin Pop
        self.processor.ChainOpCode(op, feedback_fn)
3676 a8083063 Iustin Pop
3677 a8083063 Iustin Pop
    # TODO: check for size
3678 a8083063 Iustin Pop
3679 a8083063 Iustin Pop
    for dev in snap_disks:
3680 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
3681 a8083063 Iustin Pop
                                           instance):
3682 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
3683 a8083063 Iustin Pop
                     " %s to node %s" %
3684 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
3685 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
3686 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
3687 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
3688 a8083063 Iustin Pop
3689 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
3690 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
3691 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
3692 a8083063 Iustin Pop
3693 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
3694 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
3695 a8083063 Iustin Pop
3696 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
3697 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
3698 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
3699 a8083063 Iustin Pop
    if nodelist:
3700 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
3701 a8083063 Iustin Pop
      exportlist = self.processor.ChainOpCode(op, feedback_fn)
3702 a8083063 Iustin Pop
      for node in exportlist:
3703 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
3704 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
3705 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
3706 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
3707 5c947f38 Iustin Pop
3708 5c947f38 Iustin Pop
3709 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
3710 5c947f38 Iustin Pop
  """Generic tags LU.
3711 5c947f38 Iustin Pop

3712 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
3713 5c947f38 Iustin Pop

3714 5c947f38 Iustin Pop
  """
3715 5c947f38 Iustin Pop
  def CheckPrereq(self):
3716 5c947f38 Iustin Pop
    """Check prerequisites.
3717 5c947f38 Iustin Pop

3718 5c947f38 Iustin Pop
    """
3719 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
3720 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
3721 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
3722 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
3723 5c947f38 Iustin Pop
      if name is None:
3724 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
3725 3ecf6786 Iustin Pop
                                   (self.op.name,))
3726 5c947f38 Iustin Pop
      self.op.name = name
3727 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
3728 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
3729 5c947f38 Iustin Pop
      name = self.cfg.ExpandInstanceName(name)
3730 5c947f38 Iustin Pop
      if name is None:
3731 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
3732 3ecf6786 Iustin Pop
                                   (self.op.name,))
3733 5c947f38 Iustin Pop
      self.op.name = name
3734 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
3735 5c947f38 Iustin Pop
    else:
3736 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
3737 3ecf6786 Iustin Pop
                                 str(self.op.kind))
3738 5c947f38 Iustin Pop
3739 5c947f38 Iustin Pop
3740 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
3741 5c947f38 Iustin Pop
  """Returns the tags of a given object.
3742 5c947f38 Iustin Pop

3743 5c947f38 Iustin Pop
  """
3744 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
3745 5c947f38 Iustin Pop
3746 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
3747 5c947f38 Iustin Pop
    """Returns the tag list.
3748 5c947f38 Iustin Pop

3749 5c947f38 Iustin Pop
    """
3750 5c947f38 Iustin Pop
    return self.target.GetTags()
3751 5c947f38 Iustin Pop
3752 5c947f38 Iustin Pop
3753 5c947f38 Iustin Pop
class LUAddTag(TagsLU):
3754 5c947f38 Iustin Pop
  """Sets a tag on a given object.
3755 5c947f38 Iustin Pop

3756 5c947f38 Iustin Pop
  """
3757 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name", "tag"]
3758 5c947f38 Iustin Pop
3759 5c947f38 Iustin Pop
  def CheckPrereq(self):
3760 5c947f38 Iustin Pop
    """Check prerequisites.
3761 5c947f38 Iustin Pop

3762 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
3763 5c947f38 Iustin Pop

3764 5c947f38 Iustin Pop
    """
3765 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
3766 5c947f38 Iustin Pop
    objects.TaggableObject.ValidateTag(self.op.tag)
3767 5c947f38 Iustin Pop
3768 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
3769 5c947f38 Iustin Pop
    """Sets the tag.
3770 5c947f38 Iustin Pop

3771 5c947f38 Iustin Pop
    """
3772 5c947f38 Iustin Pop
    try:
3773 5c947f38 Iustin Pop
      self.target.AddTag(self.op.tag)
3774 5c947f38 Iustin Pop
    except errors.TagError, err:
3775 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
3776 5c947f38 Iustin Pop
    try:
3777 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
3778 5c947f38 Iustin Pop
    except errors.ConfigurationError:
3779 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
3780 3ecf6786 Iustin Pop
                                " config file and the operation has been"
3781 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
3782 5c947f38 Iustin Pop
3783 5c947f38 Iustin Pop
3784 5c947f38 Iustin Pop
class LUDelTag(TagsLU):
3785 5c947f38 Iustin Pop
  """Delete a tag from a given object.
3786 5c947f38 Iustin Pop

3787 5c947f38 Iustin Pop
  """
3788 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name", "tag"]
3789 5c947f38 Iustin Pop
3790 5c947f38 Iustin Pop
  def CheckPrereq(self):
3791 5c947f38 Iustin Pop
    """Check prerequisites.
3792 5c947f38 Iustin Pop

3793 5c947f38 Iustin Pop
    This checks that we have the given tag.
3794 5c947f38 Iustin Pop

3795 5c947f38 Iustin Pop
    """
3796 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
3797 5c947f38 Iustin Pop
    objects.TaggableObject.ValidateTag(self.op.tag)
3798 5c947f38 Iustin Pop
    if self.op.tag not in self.target.GetTags():
3799 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Tag not found")
3800 5c947f38 Iustin Pop
3801 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
3802 5c947f38 Iustin Pop
    """Remove the tag from the object.
3803 5c947f38 Iustin Pop

3804 5c947f38 Iustin Pop
    """
3805 5c947f38 Iustin Pop
    self.target.RemoveTag(self.op.tag)
3806 5c947f38 Iustin Pop
    try:
3807 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
3808 5c947f38 Iustin Pop
    except errors.ConfigurationError:
3809 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
3810 3ecf6786 Iustin Pop
                                " config file and the operation has been"
3811 3ecf6786 Iustin Pop
                                " aborted. Please retry.")