Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 73415719

History | View | Annotate | Download (138.8 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import socket
30 a8083063 Iustin Pop
import time
31 a8083063 Iustin Pop
import tempfile
32 a8083063 Iustin Pop
import re
33 a8083063 Iustin Pop
import platform
34 a8083063 Iustin Pop
35 a8083063 Iustin Pop
from ganeti import rpc
36 a8083063 Iustin Pop
from ganeti import ssh
37 a8083063 Iustin Pop
from ganeti import logger
38 a8083063 Iustin Pop
from ganeti import utils
39 a8083063 Iustin Pop
from ganeti import errors
40 a8083063 Iustin Pop
from ganeti import hypervisor
41 a8083063 Iustin Pop
from ganeti import config
42 a8083063 Iustin Pop
from ganeti import constants
43 a8083063 Iustin Pop
from ganeti import objects
44 a8083063 Iustin Pop
from ganeti import opcodes
45 a8083063 Iustin Pop
from ganeti import ssconf
46 a8083063 Iustin Pop
47 a8083063 Iustin Pop
class LogicalUnit(object):
48 396e1b78 Michael Hanselmann
  """Logical Unit base class.
49 a8083063 Iustin Pop

50 a8083063 Iustin Pop
  Subclasses must follow these rules:
51 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
52 a8083063 Iustin Pop
      with all the fields (even if as None)
53 a8083063 Iustin Pop
    - implement Exec
54 a8083063 Iustin Pop
    - implement BuildHooksEnv
55 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
56 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
57 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
58 a8083063 Iustin Pop

59 a8083063 Iustin Pop
  """
60 a8083063 Iustin Pop
  HPATH = None
61 a8083063 Iustin Pop
  HTYPE = None
62 a8083063 Iustin Pop
  _OP_REQP = []
63 a8083063 Iustin Pop
  REQ_CLUSTER = True
64 a8083063 Iustin Pop
  REQ_MASTER = True
65 a8083063 Iustin Pop
66 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
67 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
68 a8083063 Iustin Pop

69 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
70 a8083063 Iustin Pop
    validity.
71 a8083063 Iustin Pop

72 a8083063 Iustin Pop
    """
73 a8083063 Iustin Pop
    self.processor = processor
74 a8083063 Iustin Pop
    self.op = op
75 a8083063 Iustin Pop
    self.cfg = cfg
76 a8083063 Iustin Pop
    self.sstore = sstore
77 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
78 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
79 a8083063 Iustin Pop
      if attr_val is None:
80 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
81 3ecf6786 Iustin Pop
                                   attr_name)
82 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
83 a8083063 Iustin Pop
      if not cfg.IsCluster():
84 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
85 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
86 a8083063 Iustin Pop
      if self.REQ_MASTER:
87 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
88 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
89 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
90 3ecf6786 Iustin Pop
                                     " node %s" % master)
91 a8083063 Iustin Pop
92 a8083063 Iustin Pop
  def CheckPrereq(self):
93 a8083063 Iustin Pop
    """Check prerequisites for this LU.
94 a8083063 Iustin Pop

95 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
96 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
97 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
98 a8083063 Iustin Pop
    allowed.
99 a8083063 Iustin Pop

100 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
101 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
102 a8083063 Iustin Pop

103 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
104 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
105 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
106 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
107 a8083063 Iustin Pop

108 a8083063 Iustin Pop
    """
109 a8083063 Iustin Pop
    raise NotImplementedError
110 a8083063 Iustin Pop
111 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
112 a8083063 Iustin Pop
    """Execute the LU.
113 a8083063 Iustin Pop

114 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
115 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
116 a8083063 Iustin Pop
    code, or expected.
117 a8083063 Iustin Pop

118 a8083063 Iustin Pop
    """
119 a8083063 Iustin Pop
    raise NotImplementedError
120 a8083063 Iustin Pop
121 a8083063 Iustin Pop
  def BuildHooksEnv(self):
122 a8083063 Iustin Pop
    """Build hooks environment for this LU.
123 a8083063 Iustin Pop

124 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
125 a8083063 Iustin Pop
    containing the environment that will be used for running the
126 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
127 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
128 a8083063 Iustin Pop
    the hook should run after the execution.
129 a8083063 Iustin Pop

130 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
131 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
132 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
133 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
134 a8083063 Iustin Pop

135 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
136 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
137 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
138 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
139 a8083063 Iustin Pop
    None).
140 a8083063 Iustin Pop

141 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
142 a8083063 Iustin Pop
    not be called.
143 a8083063 Iustin Pop

144 a8083063 Iustin Pop
    """
145 a8083063 Iustin Pop
    raise NotImplementedError
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
148 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
149 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
150 a8083063 Iustin Pop

151 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
152 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
153 a8083063 Iustin Pop

154 a8083063 Iustin Pop
  """
155 a8083063 Iustin Pop
  HPATH = None
156 a8083063 Iustin Pop
  HTYPE = None
157 a8083063 Iustin Pop
158 a8083063 Iustin Pop
  def BuildHooksEnv(self):
159 a8083063 Iustin Pop
    """Build hooks env.
160 a8083063 Iustin Pop

161 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
162 a8083063 Iustin Pop

163 a8083063 Iustin Pop
    """
164 0e137c28 Iustin Pop
    return {}, [], []
165 a8083063 Iustin Pop
166 a8083063 Iustin Pop
167 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
168 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
169 83120a01 Michael Hanselmann

170 83120a01 Michael Hanselmann
  Args:
171 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
172 83120a01 Michael Hanselmann

173 83120a01 Michael Hanselmann
  """
174 3312b702 Iustin Pop
  if not isinstance(nodes, list):
175 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
176 dcb93971 Michael Hanselmann
177 dcb93971 Michael Hanselmann
  if nodes:
178 3312b702 Iustin Pop
    wanted = []
179 dcb93971 Michael Hanselmann
180 dcb93971 Michael Hanselmann
    for name in nodes:
181 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
182 dcb93971 Michael Hanselmann
      if node is None:
183 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
184 3312b702 Iustin Pop
      wanted.append(node)
185 dcb93971 Michael Hanselmann
186 dcb93971 Michael Hanselmann
  else:
187 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
188 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
189 3312b702 Iustin Pop
190 3312b702 Iustin Pop
191 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
192 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
193 3312b702 Iustin Pop

194 3312b702 Iustin Pop
  Args:
195 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
196 3312b702 Iustin Pop

197 3312b702 Iustin Pop
  """
198 3312b702 Iustin Pop
  if not isinstance(instances, list):
199 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
200 3312b702 Iustin Pop
201 3312b702 Iustin Pop
  if instances:
202 3312b702 Iustin Pop
    wanted = []
203 3312b702 Iustin Pop
204 3312b702 Iustin Pop
    for name in instances:
205 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
206 3312b702 Iustin Pop
      if instance is None:
207 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
208 3312b702 Iustin Pop
      wanted.append(instance)
209 3312b702 Iustin Pop
210 3312b702 Iustin Pop
  else:
211 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
212 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
213 dcb93971 Michael Hanselmann
214 dcb93971 Michael Hanselmann
215 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
216 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
217 83120a01 Michael Hanselmann

218 83120a01 Michael Hanselmann
  Args:
219 83120a01 Michael Hanselmann
    static: Static fields
220 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
221 83120a01 Michael Hanselmann

222 83120a01 Michael Hanselmann
  """
223 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
224 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
225 dcb93971 Michael Hanselmann
226 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
227 dcb93971 Michael Hanselmann
228 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
229 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
230 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
231 3ecf6786 Iustin Pop
                                          difference(all_fields)))
232 dcb93971 Michael Hanselmann
233 dcb93971 Michael Hanselmann
234 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
235 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
236 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
237 ecb215b5 Michael Hanselmann

238 ecb215b5 Michael Hanselmann
  Args:
239 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
240 396e1b78 Michael Hanselmann
  """
241 396e1b78 Michael Hanselmann
  env = {
242 0e137c28 Iustin Pop
    "OP_TARGET": name,
243 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
244 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
245 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
246 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
247 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
248 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
249 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
250 396e1b78 Michael Hanselmann
  }
251 396e1b78 Michael Hanselmann
252 396e1b78 Michael Hanselmann
  if nics:
253 396e1b78 Michael Hanselmann
    nic_count = len(nics)
254 396e1b78 Michael Hanselmann
    for idx, (ip, bridge) in enumerate(nics):
255 396e1b78 Michael Hanselmann
      if ip is None:
256 396e1b78 Michael Hanselmann
        ip = ""
257 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
258 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
259 396e1b78 Michael Hanselmann
  else:
260 396e1b78 Michael Hanselmann
    nic_count = 0
261 396e1b78 Michael Hanselmann
262 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
263 396e1b78 Michael Hanselmann
264 396e1b78 Michael Hanselmann
  return env
265 396e1b78 Michael Hanselmann
266 396e1b78 Michael Hanselmann
267 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
268 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
269 ecb215b5 Michael Hanselmann

270 ecb215b5 Michael Hanselmann
  Args:
271 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
272 ecb215b5 Michael Hanselmann
    override: dict of values to override
273 ecb215b5 Michael Hanselmann
  """
274 396e1b78 Michael Hanselmann
  args = {
275 396e1b78 Michael Hanselmann
    'name': instance.name,
276 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
277 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
278 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
279 396e1b78 Michael Hanselmann
    'status': instance.os,
280 396e1b78 Michael Hanselmann
    'memory': instance.memory,
281 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
282 396e1b78 Michael Hanselmann
    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
283 396e1b78 Michael Hanselmann
  }
284 396e1b78 Michael Hanselmann
  if override:
285 396e1b78 Michael Hanselmann
    args.update(override)
286 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
287 396e1b78 Michael Hanselmann
288 396e1b78 Michael Hanselmann
289 a8083063 Iustin Pop
def _UpdateEtcHosts(fullnode, ip):
290 a8083063 Iustin Pop
  """Ensure a node has a correct entry in /etc/hosts.
291 a8083063 Iustin Pop

292 a8083063 Iustin Pop
  Args:
293 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
294 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
295 a8083063 Iustin Pop

296 a8083063 Iustin Pop
  """
297 a8083063 Iustin Pop
  node = fullnode.split(".", 1)[0]
298 a8083063 Iustin Pop
299 a8083063 Iustin Pop
  f = open('/etc/hosts', 'r+')
300 a8083063 Iustin Pop
301 a8083063 Iustin Pop
  inthere = False
302 a8083063 Iustin Pop
303 a8083063 Iustin Pop
  save_lines = []
304 a8083063 Iustin Pop
  add_lines = []
305 a8083063 Iustin Pop
  removed = False
306 a8083063 Iustin Pop
307 a8083063 Iustin Pop
  while True:
308 a8083063 Iustin Pop
    rawline = f.readline()
309 a8083063 Iustin Pop
310 a8083063 Iustin Pop
    if not rawline:
311 a8083063 Iustin Pop
      # End of file
312 a8083063 Iustin Pop
      break
313 a8083063 Iustin Pop
314 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
315 a8083063 Iustin Pop
316 a8083063 Iustin Pop
    # Strip off comments
317 a8083063 Iustin Pop
    line = line.split('#')[0]
318 a8083063 Iustin Pop
319 a8083063 Iustin Pop
    if not line:
320 a8083063 Iustin Pop
      # Entire line was comment, skip
321 a8083063 Iustin Pop
      save_lines.append(rawline)
322 a8083063 Iustin Pop
      continue
323 a8083063 Iustin Pop
324 a8083063 Iustin Pop
    fields = line.split()
325 a8083063 Iustin Pop
326 a8083063 Iustin Pop
    haveall = True
327 a8083063 Iustin Pop
    havesome = False
328 a8083063 Iustin Pop
    for spec in [ ip, fullnode, node ]:
329 a8083063 Iustin Pop
      if spec not in fields:
330 a8083063 Iustin Pop
        haveall = False
331 a8083063 Iustin Pop
      if spec in fields:
332 a8083063 Iustin Pop
        havesome = True
333 a8083063 Iustin Pop
334 a8083063 Iustin Pop
    if haveall:
335 a8083063 Iustin Pop
      inthere = True
336 a8083063 Iustin Pop
      save_lines.append(rawline)
337 a8083063 Iustin Pop
      continue
338 a8083063 Iustin Pop
339 a8083063 Iustin Pop
    if havesome and not haveall:
340 a8083063 Iustin Pop
      # Line (old, or manual?) which is missing some.  Remove.
341 a8083063 Iustin Pop
      removed = True
342 a8083063 Iustin Pop
      continue
343 a8083063 Iustin Pop
344 a8083063 Iustin Pop
    save_lines.append(rawline)
345 a8083063 Iustin Pop
346 a8083063 Iustin Pop
  if not inthere:
347 a8083063 Iustin Pop
    add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
348 a8083063 Iustin Pop
349 a8083063 Iustin Pop
  if removed:
350 a8083063 Iustin Pop
    if add_lines:
351 a8083063 Iustin Pop
      save_lines = save_lines + add_lines
352 a8083063 Iustin Pop
353 a8083063 Iustin Pop
    # We removed a line, write a new file and replace old.
354 a8083063 Iustin Pop
    fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc')
355 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
356 a8083063 Iustin Pop
    newfile.write(''.join(save_lines))
357 a8083063 Iustin Pop
    newfile.close()
358 a8083063 Iustin Pop
    os.rename(tmpname, '/etc/hosts')
359 a8083063 Iustin Pop
360 a8083063 Iustin Pop
  elif add_lines:
361 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
362 a8083063 Iustin Pop
    f.seek(0, 2)
363 a8083063 Iustin Pop
    for add in add_lines:
364 a8083063 Iustin Pop
      f.write(add)
365 a8083063 Iustin Pop
366 a8083063 Iustin Pop
  f.close()
367 a8083063 Iustin Pop
368 a8083063 Iustin Pop
369 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
370 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
371 a8083063 Iustin Pop

372 a8083063 Iustin Pop
  Args:
373 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
374 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
375 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
376 a8083063 Iustin Pop

377 a8083063 Iustin Pop
  """
378 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
379 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
380 a8083063 Iustin Pop
  else:
381 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
382 a8083063 Iustin Pop
383 a8083063 Iustin Pop
  inthere = False
384 a8083063 Iustin Pop
385 a8083063 Iustin Pop
  save_lines = []
386 a8083063 Iustin Pop
  add_lines = []
387 a8083063 Iustin Pop
  removed = False
388 a8083063 Iustin Pop
389 4cc2a728 Michael Hanselmann
  for rawline in f:
390 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
391 a8083063 Iustin Pop
392 4cc2a728 Michael Hanselmann
    parts = rawline.rstrip('\r\n').split()
393 4cc2a728 Michael Hanselmann
394 4cc2a728 Michael Hanselmann
    # Ignore unwanted lines
395 4cc2a728 Michael Hanselmann
    if len(parts) >= 3 and not rawline.lstrip()[0] == '#':
396 4cc2a728 Michael Hanselmann
      fields = parts[0].split(',')
397 4cc2a728 Michael Hanselmann
      key = parts[2]
398 4cc2a728 Michael Hanselmann
399 4cc2a728 Michael Hanselmann
      haveall = True
400 4cc2a728 Michael Hanselmann
      havesome = False
401 4cc2a728 Michael Hanselmann
      for spec in [ ip, fullnode ]:
402 4cc2a728 Michael Hanselmann
        if spec not in fields:
403 4cc2a728 Michael Hanselmann
          haveall = False
404 4cc2a728 Michael Hanselmann
        if spec in fields:
405 4cc2a728 Michael Hanselmann
          havesome = True
406 4cc2a728 Michael Hanselmann
407 4cc2a728 Michael Hanselmann
      logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
408 4cc2a728 Michael Hanselmann
      if haveall and key == pubkey:
409 4cc2a728 Michael Hanselmann
        inthere = True
410 4cc2a728 Michael Hanselmann
        save_lines.append(rawline)
411 4cc2a728 Michael Hanselmann
        logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
412 4cc2a728 Michael Hanselmann
        continue
413 4cc2a728 Michael Hanselmann
414 4cc2a728 Michael Hanselmann
      if havesome and (not haveall or key != pubkey):
415 4cc2a728 Michael Hanselmann
        removed = True
416 4cc2a728 Michael Hanselmann
        logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
417 4cc2a728 Michael Hanselmann
        continue
418 a8083063 Iustin Pop
419 a8083063 Iustin Pop
    save_lines.append(rawline)
420 a8083063 Iustin Pop
421 a8083063 Iustin Pop
  if not inthere:
422 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
423 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
424 a8083063 Iustin Pop
425 a8083063 Iustin Pop
  if removed:
426 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
427 a8083063 Iustin Pop
428 a8083063 Iustin Pop
    # Write a new file and replace old.
429 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
430 82122173 Iustin Pop
                                   constants.DATA_DIR)
431 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
432 82122173 Iustin Pop
    try:
433 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
434 82122173 Iustin Pop
    finally:
435 82122173 Iustin Pop
      newfile.close()
436 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
437 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
438 a8083063 Iustin Pop
439 a8083063 Iustin Pop
  elif add_lines:
440 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
441 a8083063 Iustin Pop
    f.seek(0, 2)
442 a8083063 Iustin Pop
    for add in add_lines:
443 a8083063 Iustin Pop
      f.write(add)
444 a8083063 Iustin Pop
445 a8083063 Iustin Pop
  f.close()
446 a8083063 Iustin Pop
447 a8083063 Iustin Pop
448 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
449 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
450 a8083063 Iustin Pop

451 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
452 a8083063 Iustin Pop
  is the error message.
453 a8083063 Iustin Pop

454 a8083063 Iustin Pop
  """
455 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
456 a8083063 Iustin Pop
  if vgsize is None:
457 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
458 a8083063 Iustin Pop
  elif vgsize < 20480:
459 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
460 191a8385 Guido Trotter
            (vgname, vgsize))
461 a8083063 Iustin Pop
  return None
462 a8083063 Iustin Pop
463 a8083063 Iustin Pop
464 a8083063 Iustin Pop
def _InitSSHSetup(node):
465 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
466 a8083063 Iustin Pop

467 a8083063 Iustin Pop

468 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
469 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
470 a8083063 Iustin Pop

471 a8083063 Iustin Pop
  Args:
472 a8083063 Iustin Pop
    node: the name of this host as a fqdn
473 a8083063 Iustin Pop

474 a8083063 Iustin Pop
  """
475 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
476 a8083063 Iustin Pop
477 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
478 70d9e3d8 Iustin Pop
    if os.path.exists(name):
479 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
480 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
481 a8083063 Iustin Pop
482 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
483 70d9e3d8 Iustin Pop
                         "-f", priv_key,
484 a8083063 Iustin Pop
                         "-q", "-N", ""])
485 a8083063 Iustin Pop
  if result.failed:
486 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
487 3ecf6786 Iustin Pop
                             result.output)
488 a8083063 Iustin Pop
489 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
490 a8083063 Iustin Pop
  try:
491 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
492 a8083063 Iustin Pop
  finally:
493 a8083063 Iustin Pop
    f.close()
494 a8083063 Iustin Pop
495 a8083063 Iustin Pop
496 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
497 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
498 a8083063 Iustin Pop

499 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
500 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
501 a8083063 Iustin Pop

502 a8083063 Iustin Pop
  """
503 a8083063 Iustin Pop
  # Create pseudo random password
504 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
505 a8083063 Iustin Pop
  # and write it into sstore
506 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
507 a8083063 Iustin Pop
508 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
509 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
510 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
511 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
512 a8083063 Iustin Pop
  if result.failed:
513 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
514 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
515 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
516 a8083063 Iustin Pop
517 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
518 a8083063 Iustin Pop
519 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
520 a8083063 Iustin Pop
521 a8083063 Iustin Pop
  if result.failed:
522 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
523 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
524 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
525 a8083063 Iustin Pop
526 a8083063 Iustin Pop
527 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
528 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
529 bf6929a2 Alexander Schreiber

530 bf6929a2 Alexander Schreiber
  """
531 bf6929a2 Alexander Schreiber
  # check bridges existance
532 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
533 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
534 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
535 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
536 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
537 bf6929a2 Alexander Schreiber
538 bf6929a2 Alexander Schreiber
539 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
540 a8083063 Iustin Pop
  """Initialise the cluster.
541 a8083063 Iustin Pop

542 a8083063 Iustin Pop
  """
543 a8083063 Iustin Pop
  HPATH = "cluster-init"
544 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
545 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
546 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
547 a8083063 Iustin Pop
  REQ_CLUSTER = False
548 a8083063 Iustin Pop
549 a8083063 Iustin Pop
  def BuildHooksEnv(self):
550 a8083063 Iustin Pop
    """Build hooks env.
551 a8083063 Iustin Pop

552 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
553 a8083063 Iustin Pop
    ourselves in the post-run node list.
554 a8083063 Iustin Pop

555 a8083063 Iustin Pop
    """
556 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
557 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
558 a8083063 Iustin Pop
559 a8083063 Iustin Pop
  def CheckPrereq(self):
560 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
561 a8083063 Iustin Pop

562 a8083063 Iustin Pop
    """
563 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
564 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
565 a8083063 Iustin Pop
566 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
567 ff98055b Iustin Pop
568 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
569 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
570 130e907e Iustin Pop
                                 " range (%s). Please fix DNS or /etc/hosts." %
571 bcf043c9 Iustin Pop
                                 (hostname.ip,))
572 130e907e Iustin Pop
573 89e1fc26 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
574 a8083063 Iustin Pop
575 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, hostname.ip,
576 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
577 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
578 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
579 3ecf6786 Iustin Pop
                                 " belong to this host."
580 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
581 a8083063 Iustin Pop
582 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
583 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
584 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
585 16abfbc2 Alexander Schreiber
    if (secondary_ip and
586 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
587 16abfbc2 Alexander Schreiber
        (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
588 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT))):
589 16abfbc2 Alexander Schreiber
      raise errors.OpPrereqError("You gave %s as secondary IP,\n"
590 16abfbc2 Alexander Schreiber
                                 "but it does not belong to this host." %
591 16abfbc2 Alexander Schreiber
                                 secondary_ip)
592 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
593 a8083063 Iustin Pop
594 a8083063 Iustin Pop
    # checks presence of the volume group given
595 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
596 a8083063 Iustin Pop
597 a8083063 Iustin Pop
    if vgstatus:
598 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
599 a8083063 Iustin Pop
600 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
601 a8083063 Iustin Pop
                    self.op.mac_prefix):
602 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
603 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
604 a8083063 Iustin Pop
605 a8083063 Iustin Pop
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
606 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
607 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
608 a8083063 Iustin Pop
609 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
610 880478f8 Iustin Pop
    if result.failed:
611 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
612 8925faaa Iustin Pop
                                 (self.op.master_netdev,
613 8925faaa Iustin Pop
                                  result.output.strip()))
614 880478f8 Iustin Pop
615 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
616 a8083063 Iustin Pop
    """Initialize the cluster.
617 a8083063 Iustin Pop

618 a8083063 Iustin Pop
    """
619 a8083063 Iustin Pop
    clustername = self.clustername
620 a8083063 Iustin Pop
    hostname = self.hostname
621 a8083063 Iustin Pop
622 a8083063 Iustin Pop
    # set up the simple store
623 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
624 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
625 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
626 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
627 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
628 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
629 a8083063 Iustin Pop
630 a8083063 Iustin Pop
    # set up the inter-node password and certificate
631 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
632 a8083063 Iustin Pop
633 a8083063 Iustin Pop
    # start the master ip
634 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
635 a8083063 Iustin Pop
636 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
637 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
638 a8083063 Iustin Pop
    try:
639 a8083063 Iustin Pop
      sshline = f.read()
640 a8083063 Iustin Pop
    finally:
641 a8083063 Iustin Pop
      f.close()
642 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
643 a8083063 Iustin Pop
644 bcf043c9 Iustin Pop
    _UpdateEtcHosts(hostname.name, hostname.ip)
645 a8083063 Iustin Pop
646 bcf043c9 Iustin Pop
    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
647 a8083063 Iustin Pop
648 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
649 a8083063 Iustin Pop
650 a8083063 Iustin Pop
    # init of cluster config file
651 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
652 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
653 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
654 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
655 a8083063 Iustin Pop
656 a8083063 Iustin Pop
657 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
658 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
659 a8083063 Iustin Pop

660 a8083063 Iustin Pop
  """
661 a8083063 Iustin Pop
  _OP_REQP = []
662 a8083063 Iustin Pop
663 a8083063 Iustin Pop
  def CheckPrereq(self):
664 a8083063 Iustin Pop
    """Check prerequisites.
665 a8083063 Iustin Pop

666 a8083063 Iustin Pop
    This checks whether the cluster is empty.
667 a8083063 Iustin Pop

668 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
669 a8083063 Iustin Pop

670 a8083063 Iustin Pop
    """
671 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
672 a8083063 Iustin Pop
673 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
674 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
675 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
676 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
677 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
678 db915bd1 Michael Hanselmann
    if instancelist:
679 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
680 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
681 a8083063 Iustin Pop
682 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
683 a8083063 Iustin Pop
    """Destroys the cluster.
684 a8083063 Iustin Pop

685 a8083063 Iustin Pop
    """
686 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
687 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
688 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
689 880478f8 Iustin Pop
    rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
690 a8083063 Iustin Pop
691 a8083063 Iustin Pop
692 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
693 a8083063 Iustin Pop
  """Verifies the cluster status.
694 a8083063 Iustin Pop

695 a8083063 Iustin Pop
  """
696 a8083063 Iustin Pop
  _OP_REQP = []
697 a8083063 Iustin Pop
698 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
699 a8083063 Iustin Pop
                  remote_version, feedback_fn):
700 a8083063 Iustin Pop
    """Run multiple tests against a node.
701 a8083063 Iustin Pop

702 a8083063 Iustin Pop
    Test list:
703 a8083063 Iustin Pop
      - compares ganeti version
704 a8083063 Iustin Pop
      - checks vg existance and size > 20G
705 a8083063 Iustin Pop
      - checks config file checksum
706 a8083063 Iustin Pop
      - checks ssh to other nodes
707 a8083063 Iustin Pop

708 a8083063 Iustin Pop
    Args:
709 a8083063 Iustin Pop
      node: name of the node to check
710 a8083063 Iustin Pop
      file_list: required list of files
711 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
712 098c0958 Michael Hanselmann

713 a8083063 Iustin Pop
    """
714 a8083063 Iustin Pop
    # compares ganeti version
715 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
716 a8083063 Iustin Pop
    if not remote_version:
717 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
718 a8083063 Iustin Pop
      return True
719 a8083063 Iustin Pop
720 a8083063 Iustin Pop
    if local_version != remote_version:
721 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
722 a8083063 Iustin Pop
                      (local_version, node, remote_version))
723 a8083063 Iustin Pop
      return True
724 a8083063 Iustin Pop
725 a8083063 Iustin Pop
    # checks vg existance and size > 20G
726 a8083063 Iustin Pop
727 a8083063 Iustin Pop
    bad = False
728 a8083063 Iustin Pop
    if not vglist:
729 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
730 a8083063 Iustin Pop
                      (node,))
731 a8083063 Iustin Pop
      bad = True
732 a8083063 Iustin Pop
    else:
733 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
734 a8083063 Iustin Pop
      if vgstatus:
735 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
736 a8083063 Iustin Pop
        bad = True
737 a8083063 Iustin Pop
738 a8083063 Iustin Pop
    # checks config file checksum
739 a8083063 Iustin Pop
    # checks ssh to any
740 a8083063 Iustin Pop
741 a8083063 Iustin Pop
    if 'filelist' not in node_result:
742 a8083063 Iustin Pop
      bad = True
743 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
744 a8083063 Iustin Pop
    else:
745 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
746 a8083063 Iustin Pop
      for file_name in file_list:
747 a8083063 Iustin Pop
        if file_name not in remote_cksum:
748 a8083063 Iustin Pop
          bad = True
749 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
750 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
751 a8083063 Iustin Pop
          bad = True
752 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
753 a8083063 Iustin Pop
754 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
755 a8083063 Iustin Pop
      bad = True
756 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
757 a8083063 Iustin Pop
    else:
758 a8083063 Iustin Pop
      if node_result['nodelist']:
759 a8083063 Iustin Pop
        bad = True
760 a8083063 Iustin Pop
        for node in node_result['nodelist']:
761 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
762 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
763 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
764 a8083063 Iustin Pop
    if hyp_result is not None:
765 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
766 a8083063 Iustin Pop
    return bad
767 a8083063 Iustin Pop
768 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
769 a8083063 Iustin Pop
    """Verify an instance.
770 a8083063 Iustin Pop

771 a8083063 Iustin Pop
    This function checks to see if the required block devices are
772 a8083063 Iustin Pop
    available on the instance's node.
773 a8083063 Iustin Pop

774 a8083063 Iustin Pop
    """
775 a8083063 Iustin Pop
    bad = False
776 a8083063 Iustin Pop
777 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
778 a8083063 Iustin Pop
    if not instance in instancelist:
779 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
780 a8083063 Iustin Pop
                      (instance, instancelist))
781 a8083063 Iustin Pop
      bad = True
782 a8083063 Iustin Pop
783 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
784 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
785 a8083063 Iustin Pop
786 a8083063 Iustin Pop
    node_vol_should = {}
787 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
788 a8083063 Iustin Pop
789 a8083063 Iustin Pop
    for node in node_vol_should:
790 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
791 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
792 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
793 a8083063 Iustin Pop
                          (volume, node))
794 a8083063 Iustin Pop
          bad = True
795 a8083063 Iustin Pop
796 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
797 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
798 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
799 a8083063 Iustin Pop
                        (instance, node_current))
800 a8083063 Iustin Pop
        bad = True
801 a8083063 Iustin Pop
802 a8083063 Iustin Pop
    for node in node_instance:
803 a8083063 Iustin Pop
      if (not node == node_current):
804 a8083063 Iustin Pop
        if instance in node_instance[node]:
805 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
806 a8083063 Iustin Pop
                          (instance, node))
807 a8083063 Iustin Pop
          bad = True
808 a8083063 Iustin Pop
809 6a438c98 Michael Hanselmann
    return bad
810 a8083063 Iustin Pop
811 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
812 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
813 a8083063 Iustin Pop

814 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
815 a8083063 Iustin Pop
    reported as unknown.
816 a8083063 Iustin Pop

817 a8083063 Iustin Pop
    """
818 a8083063 Iustin Pop
    bad = False
819 a8083063 Iustin Pop
820 a8083063 Iustin Pop
    for node in node_vol_is:
821 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
822 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
823 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
824 a8083063 Iustin Pop
                      (volume, node))
825 a8083063 Iustin Pop
          bad = True
826 a8083063 Iustin Pop
    return bad
827 a8083063 Iustin Pop
828 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
829 a8083063 Iustin Pop
    """Verify the list of running instances.
830 a8083063 Iustin Pop

831 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
832 a8083063 Iustin Pop

833 a8083063 Iustin Pop
    """
834 a8083063 Iustin Pop
    bad = False
835 a8083063 Iustin Pop
    for node in node_instance:
836 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
837 a8083063 Iustin Pop
        if runninginstance not in instancelist:
838 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
839 a8083063 Iustin Pop
                          (runninginstance, node))
840 a8083063 Iustin Pop
          bad = True
841 a8083063 Iustin Pop
    return bad
842 a8083063 Iustin Pop
843 a8083063 Iustin Pop
  def CheckPrereq(self):
844 a8083063 Iustin Pop
    """Check prerequisites.
845 a8083063 Iustin Pop

846 a8083063 Iustin Pop
    This has no prerequisites.
847 a8083063 Iustin Pop

848 a8083063 Iustin Pop
    """
849 a8083063 Iustin Pop
    pass
850 a8083063 Iustin Pop
851 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
852 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
853 a8083063 Iustin Pop

854 a8083063 Iustin Pop
    """
855 a8083063 Iustin Pop
    bad = False
856 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
857 a8083063 Iustin Pop
    self.cfg.VerifyConfig()
858 a8083063 Iustin Pop
859 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
860 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
861 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
862 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
863 a8083063 Iustin Pop
    node_volume = {}
864 a8083063 Iustin Pop
    node_instance = {}
865 a8083063 Iustin Pop
866 a8083063 Iustin Pop
    # FIXME: verify OS list
867 a8083063 Iustin Pop
    # do local checksums
868 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
869 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
870 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
871 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
872 a8083063 Iustin Pop
873 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
874 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
875 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
876 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
877 a8083063 Iustin Pop
    node_verify_param = {
878 a8083063 Iustin Pop
      'filelist': file_names,
879 a8083063 Iustin Pop
      'nodelist': nodelist,
880 a8083063 Iustin Pop
      'hypervisor': None,
881 a8083063 Iustin Pop
      }
882 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
883 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
884 a8083063 Iustin Pop
885 a8083063 Iustin Pop
    for node in nodelist:
886 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
887 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
888 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
889 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
890 a8083063 Iustin Pop
      bad = bad or result
891 a8083063 Iustin Pop
892 a8083063 Iustin Pop
      # node_volume
893 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
894 a8083063 Iustin Pop
895 a8083063 Iustin Pop
      if type(volumeinfo) != dict:
896 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
897 a8083063 Iustin Pop
        bad = True
898 a8083063 Iustin Pop
        continue
899 a8083063 Iustin Pop
900 a8083063 Iustin Pop
      node_volume[node] = volumeinfo
901 a8083063 Iustin Pop
902 a8083063 Iustin Pop
      # node_instance
903 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
904 a8083063 Iustin Pop
      if type(nodeinstance) != list:
905 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
906 a8083063 Iustin Pop
        bad = True
907 a8083063 Iustin Pop
        continue
908 a8083063 Iustin Pop
909 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
910 a8083063 Iustin Pop
911 a8083063 Iustin Pop
    node_vol_should = {}
912 a8083063 Iustin Pop
913 a8083063 Iustin Pop
    for instance in instancelist:
914 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
915 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
916 a8083063 Iustin Pop
                                     feedback_fn)
917 a8083063 Iustin Pop
      bad = bad or result
918 a8083063 Iustin Pop
919 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
920 a8083063 Iustin Pop
921 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
922 a8083063 Iustin Pop
923 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
924 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
925 a8083063 Iustin Pop
                                       feedback_fn)
926 a8083063 Iustin Pop
    bad = bad or result
927 a8083063 Iustin Pop
928 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
929 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
930 a8083063 Iustin Pop
                                         feedback_fn)
931 a8083063 Iustin Pop
    bad = bad or result
932 a8083063 Iustin Pop
933 a8083063 Iustin Pop
    return int(bad)
934 a8083063 Iustin Pop
935 a8083063 Iustin Pop
936 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
937 07bd8a51 Iustin Pop
  """Rename the cluster.
938 07bd8a51 Iustin Pop

939 07bd8a51 Iustin Pop
  """
940 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
941 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
942 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
943 07bd8a51 Iustin Pop
944 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
945 07bd8a51 Iustin Pop
    """Build hooks env.
946 07bd8a51 Iustin Pop

947 07bd8a51 Iustin Pop
    """
948 07bd8a51 Iustin Pop
    env = {
949 0e137c28 Iustin Pop
      "OP_TARGET": self.op.sstore.GetClusterName(),
950 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
951 07bd8a51 Iustin Pop
      }
952 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
953 07bd8a51 Iustin Pop
    return env, [mn], [mn]
954 07bd8a51 Iustin Pop
955 07bd8a51 Iustin Pop
  def CheckPrereq(self):
956 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
957 07bd8a51 Iustin Pop

958 07bd8a51 Iustin Pop
    """
959 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
960 07bd8a51 Iustin Pop
961 bcf043c9 Iustin Pop
    new_name = hostname.name
962 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
963 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
964 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
965 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
966 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
967 07bd8a51 Iustin Pop
                                 " cluster has changed")
968 07bd8a51 Iustin Pop
    if new_ip != old_ip:
969 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
970 07bd8a51 Iustin Pop
      if not result.failed:
971 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
972 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
973 07bd8a51 Iustin Pop
                                   new_ip)
974 07bd8a51 Iustin Pop
975 07bd8a51 Iustin Pop
    self.op.name = new_name
976 07bd8a51 Iustin Pop
977 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
978 07bd8a51 Iustin Pop
    """Rename the cluster.
979 07bd8a51 Iustin Pop

980 07bd8a51 Iustin Pop
    """
981 07bd8a51 Iustin Pop
    clustername = self.op.name
982 07bd8a51 Iustin Pop
    ip = self.ip
983 07bd8a51 Iustin Pop
    ss = self.sstore
984 07bd8a51 Iustin Pop
985 07bd8a51 Iustin Pop
    # shutdown the master IP
986 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
987 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
988 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
989 07bd8a51 Iustin Pop
990 07bd8a51 Iustin Pop
    try:
991 07bd8a51 Iustin Pop
      # modify the sstore
992 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
993 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
994 07bd8a51 Iustin Pop
995 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
996 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
997 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
998 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
999 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
1000 07bd8a51 Iustin Pop
1001 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1002 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1003 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1004 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1005 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1006 07bd8a51 Iustin Pop
          if not result[to_node]:
1007 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1008 07bd8a51 Iustin Pop
                         (fname, to_node))
1009 07bd8a51 Iustin Pop
    finally:
1010 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
1011 07bd8a51 Iustin Pop
        logger.Error("Could not re-enable the master role on the master,\n"
1012 07bd8a51 Iustin Pop
                     "please restart manually.")
1013 07bd8a51 Iustin Pop
1014 07bd8a51 Iustin Pop
1015 a8083063 Iustin Pop
def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
1016 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1017 a8083063 Iustin Pop

1018 a8083063 Iustin Pop
  """
1019 a8083063 Iustin Pop
  if not instance.disks:
1020 a8083063 Iustin Pop
    return True
1021 a8083063 Iustin Pop
1022 a8083063 Iustin Pop
  if not oneshot:
1023 a8083063 Iustin Pop
    logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
1024 a8083063 Iustin Pop
1025 a8083063 Iustin Pop
  node = instance.primary_node
1026 a8083063 Iustin Pop
1027 a8083063 Iustin Pop
  for dev in instance.disks:
1028 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1029 a8083063 Iustin Pop
1030 a8083063 Iustin Pop
  retries = 0
1031 a8083063 Iustin Pop
  while True:
1032 a8083063 Iustin Pop
    max_time = 0
1033 a8083063 Iustin Pop
    done = True
1034 a8083063 Iustin Pop
    cumul_degraded = False
1035 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1036 a8083063 Iustin Pop
    if not rstats:
1037 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1038 a8083063 Iustin Pop
      retries += 1
1039 a8083063 Iustin Pop
      if retries >= 10:
1040 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1041 3ecf6786 Iustin Pop
                                 " aborting." % node)
1042 a8083063 Iustin Pop
      time.sleep(6)
1043 a8083063 Iustin Pop
      continue
1044 a8083063 Iustin Pop
    retries = 0
1045 a8083063 Iustin Pop
    for i in range(len(rstats)):
1046 a8083063 Iustin Pop
      mstat = rstats[i]
1047 a8083063 Iustin Pop
      if mstat is None:
1048 a8083063 Iustin Pop
        logger.ToStderr("Can't compute data for node %s/%s" %
1049 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1050 a8083063 Iustin Pop
        continue
1051 a8083063 Iustin Pop
      perc_done, est_time, is_degraded = mstat
1052 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1053 a8083063 Iustin Pop
      if perc_done is not None:
1054 a8083063 Iustin Pop
        done = False
1055 a8083063 Iustin Pop
        if est_time is not None:
1056 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1057 a8083063 Iustin Pop
          max_time = est_time
1058 a8083063 Iustin Pop
        else:
1059 a8083063 Iustin Pop
          rem_time = "no time estimate"
1060 a8083063 Iustin Pop
        logger.ToStdout("- device %s: %5.2f%% done, %s" %
1061 a8083063 Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1062 a8083063 Iustin Pop
    if done or oneshot:
1063 a8083063 Iustin Pop
      break
1064 a8083063 Iustin Pop
1065 a8083063 Iustin Pop
    if unlock:
1066 a8083063 Iustin Pop
      utils.Unlock('cmd')
1067 a8083063 Iustin Pop
    try:
1068 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1069 a8083063 Iustin Pop
    finally:
1070 a8083063 Iustin Pop
      if unlock:
1071 a8083063 Iustin Pop
        utils.Lock('cmd')
1072 a8083063 Iustin Pop
1073 a8083063 Iustin Pop
  if done:
1074 a8083063 Iustin Pop
    logger.ToStdout("Instance %s's disks are in sync." % instance.name)
1075 a8083063 Iustin Pop
  return not cumul_degraded
1076 a8083063 Iustin Pop
1077 a8083063 Iustin Pop
1078 a8083063 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary):
1079 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1080 a8083063 Iustin Pop

1081 a8083063 Iustin Pop
  """
1082 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1083 a8083063 Iustin Pop
1084 a8083063 Iustin Pop
  result = True
1085 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1086 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1087 a8083063 Iustin Pop
    if not rstats:
1088 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1089 a8083063 Iustin Pop
      result = False
1090 a8083063 Iustin Pop
    else:
1091 a8083063 Iustin Pop
      result = result and (not rstats[5])
1092 a8083063 Iustin Pop
  if dev.children:
1093 a8083063 Iustin Pop
    for child in dev.children:
1094 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1095 a8083063 Iustin Pop
1096 a8083063 Iustin Pop
  return result
1097 a8083063 Iustin Pop
1098 a8083063 Iustin Pop
1099 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1100 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1101 a8083063 Iustin Pop

1102 a8083063 Iustin Pop
  """
1103 a8083063 Iustin Pop
  _OP_REQP = []
1104 a8083063 Iustin Pop
1105 a8083063 Iustin Pop
  def CheckPrereq(self):
1106 a8083063 Iustin Pop
    """Check prerequisites.
1107 a8083063 Iustin Pop

1108 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1109 a8083063 Iustin Pop

1110 a8083063 Iustin Pop
    """
1111 a8083063 Iustin Pop
    return
1112 a8083063 Iustin Pop
1113 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1114 a8083063 Iustin Pop
    """Compute the list of OSes.
1115 a8083063 Iustin Pop

1116 a8083063 Iustin Pop
    """
1117 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1118 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1119 a8083063 Iustin Pop
    if node_data == False:
1120 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1121 a8083063 Iustin Pop
    return node_data
1122 a8083063 Iustin Pop
1123 a8083063 Iustin Pop
1124 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1125 a8083063 Iustin Pop
  """Logical unit for removing a node.
1126 a8083063 Iustin Pop

1127 a8083063 Iustin Pop
  """
1128 a8083063 Iustin Pop
  HPATH = "node-remove"
1129 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1130 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1131 a8083063 Iustin Pop
1132 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1133 a8083063 Iustin Pop
    """Build hooks env.
1134 a8083063 Iustin Pop

1135 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1136 a8083063 Iustin Pop
    node would not allows itself to run.
1137 a8083063 Iustin Pop

1138 a8083063 Iustin Pop
    """
1139 396e1b78 Michael Hanselmann
    env = {
1140 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1141 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1142 396e1b78 Michael Hanselmann
      }
1143 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1144 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1145 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1146 a8083063 Iustin Pop
1147 a8083063 Iustin Pop
  def CheckPrereq(self):
1148 a8083063 Iustin Pop
    """Check prerequisites.
1149 a8083063 Iustin Pop

1150 a8083063 Iustin Pop
    This checks:
1151 a8083063 Iustin Pop
     - the node exists in the configuration
1152 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1153 a8083063 Iustin Pop
     - it's not the master
1154 a8083063 Iustin Pop

1155 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1156 a8083063 Iustin Pop

1157 a8083063 Iustin Pop
    """
1158 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1159 a8083063 Iustin Pop
    if node is None:
1160 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1161 a8083063 Iustin Pop
1162 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1163 a8083063 Iustin Pop
1164 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1165 a8083063 Iustin Pop
    if node.name == masternode:
1166 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1167 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1168 a8083063 Iustin Pop
1169 a8083063 Iustin Pop
    for instance_name in instance_list:
1170 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1171 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1172 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1173 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1174 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1175 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1176 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1177 a8083063 Iustin Pop
    self.op.node_name = node.name
1178 a8083063 Iustin Pop
    self.node = node
1179 a8083063 Iustin Pop
1180 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1181 a8083063 Iustin Pop
    """Removes the node from the cluster.
1182 a8083063 Iustin Pop

1183 a8083063 Iustin Pop
    """
1184 a8083063 Iustin Pop
    node = self.node
1185 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1186 a8083063 Iustin Pop
                node.name)
1187 a8083063 Iustin Pop
1188 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1189 a8083063 Iustin Pop
1190 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1191 a8083063 Iustin Pop
1192 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1193 a8083063 Iustin Pop
1194 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1195 a8083063 Iustin Pop
1196 a8083063 Iustin Pop
1197 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1198 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1199 a8083063 Iustin Pop

1200 a8083063 Iustin Pop
  """
1201 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1202 a8083063 Iustin Pop
1203 a8083063 Iustin Pop
  def CheckPrereq(self):
1204 a8083063 Iustin Pop
    """Check prerequisites.
1205 a8083063 Iustin Pop

1206 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1207 a8083063 Iustin Pop

1208 a8083063 Iustin Pop
    """
1209 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1210 3ef10550 Michael Hanselmann
                                     "mtotal", "mnode", "mfree",
1211 3ef10550 Michael Hanselmann
                                     "bootid"])
1212 a8083063 Iustin Pop
1213 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1214 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1215 ec223efb Iustin Pop
                               "pip", "sip"],
1216 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1217 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1218 a8083063 Iustin Pop
1219 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1220 a8083063 Iustin Pop
1221 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1222 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1223 a8083063 Iustin Pop

1224 a8083063 Iustin Pop
    """
1225 246e180a Iustin Pop
    nodenames = self.wanted
1226 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1227 a8083063 Iustin Pop
1228 a8083063 Iustin Pop
    # begin data gathering
1229 a8083063 Iustin Pop
1230 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1231 a8083063 Iustin Pop
      live_data = {}
1232 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1233 a8083063 Iustin Pop
      for name in nodenames:
1234 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1235 a8083063 Iustin Pop
        if nodeinfo:
1236 a8083063 Iustin Pop
          live_data[name] = {
1237 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1238 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1239 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1240 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1241 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1242 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1243 a8083063 Iustin Pop
            }
1244 a8083063 Iustin Pop
        else:
1245 a8083063 Iustin Pop
          live_data[name] = {}
1246 a8083063 Iustin Pop
    else:
1247 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1248 a8083063 Iustin Pop
1249 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1250 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1251 a8083063 Iustin Pop
1252 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1253 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1254 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1255 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1256 a8083063 Iustin Pop
1257 ec223efb Iustin Pop
      for instance_name in instancelist:
1258 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1259 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1260 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1261 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1262 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1263 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1264 a8083063 Iustin Pop
1265 a8083063 Iustin Pop
    # end data gathering
1266 a8083063 Iustin Pop
1267 a8083063 Iustin Pop
    output = []
1268 a8083063 Iustin Pop
    for node in nodelist:
1269 a8083063 Iustin Pop
      node_output = []
1270 a8083063 Iustin Pop
      for field in self.op.output_fields:
1271 a8083063 Iustin Pop
        if field == "name":
1272 a8083063 Iustin Pop
          val = node.name
1273 ec223efb Iustin Pop
        elif field == "pinst_list":
1274 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1275 ec223efb Iustin Pop
        elif field == "sinst_list":
1276 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1277 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1278 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1279 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1280 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1281 a8083063 Iustin Pop
        elif field == "pip":
1282 a8083063 Iustin Pop
          val = node.primary_ip
1283 a8083063 Iustin Pop
        elif field == "sip":
1284 a8083063 Iustin Pop
          val = node.secondary_ip
1285 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1286 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1287 a8083063 Iustin Pop
        else:
1288 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1289 a8083063 Iustin Pop
        node_output.append(val)
1290 a8083063 Iustin Pop
      output.append(node_output)
1291 a8083063 Iustin Pop
1292 a8083063 Iustin Pop
    return output
1293 a8083063 Iustin Pop
1294 a8083063 Iustin Pop
1295 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1296 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1297 dcb93971 Michael Hanselmann

1298 dcb93971 Michael Hanselmann
  """
1299 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1300 dcb93971 Michael Hanselmann
1301 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1302 dcb93971 Michael Hanselmann
    """Check prerequisites.
1303 dcb93971 Michael Hanselmann

1304 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1305 dcb93971 Michael Hanselmann

1306 dcb93971 Michael Hanselmann
    """
1307 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1308 dcb93971 Michael Hanselmann
1309 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1310 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1311 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1312 dcb93971 Michael Hanselmann
1313 dcb93971 Michael Hanselmann
1314 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1315 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1316 dcb93971 Michael Hanselmann

1317 dcb93971 Michael Hanselmann
    """
1318 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1319 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1320 dcb93971 Michael Hanselmann
1321 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1322 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1323 dcb93971 Michael Hanselmann
1324 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1325 dcb93971 Michael Hanselmann
1326 dcb93971 Michael Hanselmann
    output = []
1327 dcb93971 Michael Hanselmann
    for node in nodenames:
1328 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1329 37d19eb2 Michael Hanselmann
        continue
1330 37d19eb2 Michael Hanselmann
1331 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1332 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1333 dcb93971 Michael Hanselmann
1334 dcb93971 Michael Hanselmann
      for vol in node_vols:
1335 dcb93971 Michael Hanselmann
        node_output = []
1336 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1337 dcb93971 Michael Hanselmann
          if field == "node":
1338 dcb93971 Michael Hanselmann
            val = node
1339 dcb93971 Michael Hanselmann
          elif field == "phys":
1340 dcb93971 Michael Hanselmann
            val = vol['dev']
1341 dcb93971 Michael Hanselmann
          elif field == "vg":
1342 dcb93971 Michael Hanselmann
            val = vol['vg']
1343 dcb93971 Michael Hanselmann
          elif field == "name":
1344 dcb93971 Michael Hanselmann
            val = vol['name']
1345 dcb93971 Michael Hanselmann
          elif field == "size":
1346 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1347 dcb93971 Michael Hanselmann
          elif field == "instance":
1348 dcb93971 Michael Hanselmann
            for inst in ilist:
1349 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1350 dcb93971 Michael Hanselmann
                continue
1351 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1352 dcb93971 Michael Hanselmann
                val = inst.name
1353 dcb93971 Michael Hanselmann
                break
1354 dcb93971 Michael Hanselmann
            else:
1355 dcb93971 Michael Hanselmann
              val = '-'
1356 dcb93971 Michael Hanselmann
          else:
1357 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1358 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1359 dcb93971 Michael Hanselmann
1360 dcb93971 Michael Hanselmann
        output.append(node_output)
1361 dcb93971 Michael Hanselmann
1362 dcb93971 Michael Hanselmann
    return output
1363 dcb93971 Michael Hanselmann
1364 dcb93971 Michael Hanselmann
1365 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1366 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1367 a8083063 Iustin Pop

1368 a8083063 Iustin Pop
  """
1369 a8083063 Iustin Pop
  HPATH = "node-add"
1370 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1371 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1372 a8083063 Iustin Pop
1373 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1374 a8083063 Iustin Pop
    """Build hooks env.
1375 a8083063 Iustin Pop

1376 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1377 a8083063 Iustin Pop

1378 a8083063 Iustin Pop
    """
1379 a8083063 Iustin Pop
    env = {
1380 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1381 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1382 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1383 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1384 a8083063 Iustin Pop
      }
1385 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1386 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1387 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1388 a8083063 Iustin Pop
1389 a8083063 Iustin Pop
  def CheckPrereq(self):
1390 a8083063 Iustin Pop
    """Check prerequisites.
1391 a8083063 Iustin Pop

1392 a8083063 Iustin Pop
    This checks:
1393 a8083063 Iustin Pop
     - the new node is not already in the config
1394 a8083063 Iustin Pop
     - it is resolvable
1395 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1396 a8083063 Iustin Pop

1397 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1398 a8083063 Iustin Pop

1399 a8083063 Iustin Pop
    """
1400 a8083063 Iustin Pop
    node_name = self.op.node_name
1401 a8083063 Iustin Pop
    cfg = self.cfg
1402 a8083063 Iustin Pop
1403 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1404 a8083063 Iustin Pop
1405 bcf043c9 Iustin Pop
    node = dns_data.name
1406 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1407 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1408 a8083063 Iustin Pop
    if secondary_ip is None:
1409 a8083063 Iustin Pop
      secondary_ip = primary_ip
1410 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1411 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1412 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1413 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1414 a8083063 Iustin Pop
    if node in node_list:
1415 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1416 3ecf6786 Iustin Pop
                                 % node)
1417 a8083063 Iustin Pop
1418 a8083063 Iustin Pop
    for existing_node_name in node_list:
1419 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1420 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1421 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1422 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1423 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1424 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1425 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1426 a8083063 Iustin Pop
1427 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1428 a8083063 Iustin Pop
    # same as for the master
1429 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1430 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1431 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1432 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1433 a8083063 Iustin Pop
      if master_singlehomed:
1434 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1435 3ecf6786 Iustin Pop
                                   " new node has one")
1436 a8083063 Iustin Pop
      else:
1437 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1438 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1439 a8083063 Iustin Pop
1440 a8083063 Iustin Pop
    # checks reachablity
1441 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(utils.HostInfo().name,
1442 16abfbc2 Alexander Schreiber
                         primary_ip,
1443 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
1444 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1445 a8083063 Iustin Pop
1446 a8083063 Iustin Pop
    if not newbie_singlehomed:
1447 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1448 16abfbc2 Alexander Schreiber
      if not utils.TcpPing(myself.secondary_ip,
1449 16abfbc2 Alexander Schreiber
                           secondary_ip,
1450 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT):
1451 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError(
1452 16abfbc2 Alexander Schreiber
          "Node secondary ip not reachable by TCP based ping to noded port")
1453 a8083063 Iustin Pop
1454 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1455 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1456 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1457 a8083063 Iustin Pop
1458 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1459 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1460 a8083063 Iustin Pop

1461 a8083063 Iustin Pop
    """
1462 a8083063 Iustin Pop
    new_node = self.new_node
1463 a8083063 Iustin Pop
    node = new_node.name
1464 a8083063 Iustin Pop
1465 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1466 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1467 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1468 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1469 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1470 a8083063 Iustin Pop
    try:
1471 a8083063 Iustin Pop
      gntpem = f.read(8192)
1472 a8083063 Iustin Pop
    finally:
1473 a8083063 Iustin Pop
      f.close()
1474 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1475 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1476 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1477 a8083063 Iustin Pop
    # parsed by the shell sequence below
1478 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1479 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1480 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1481 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1482 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1483 a8083063 Iustin Pop
1484 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1485 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1486 a8083063 Iustin Pop
    # either by being constants or by the checks above
1487 a8083063 Iustin Pop
    ss = self.sstore
1488 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1489 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1490 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1491 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1492 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1493 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1494 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1495 a8083063 Iustin Pop
1496 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1497 a8083063 Iustin Pop
    if result.failed:
1498 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1499 3ecf6786 Iustin Pop
                               " output: %s" %
1500 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1501 a8083063 Iustin Pop
1502 a8083063 Iustin Pop
    # check connectivity
1503 a8083063 Iustin Pop
    time.sleep(4)
1504 a8083063 Iustin Pop
1505 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1506 a8083063 Iustin Pop
    if result:
1507 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1508 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1509 a8083063 Iustin Pop
                    (node, result))
1510 a8083063 Iustin Pop
      else:
1511 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1512 3ecf6786 Iustin Pop
                                 " node version %s" %
1513 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1514 a8083063 Iustin Pop
    else:
1515 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1516 a8083063 Iustin Pop
1517 a8083063 Iustin Pop
    # setup ssh on node
1518 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1519 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1520 a8083063 Iustin Pop
    keyarray = []
1521 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1522 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1523 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1524 a8083063 Iustin Pop
1525 a8083063 Iustin Pop
    for i in keyfiles:
1526 a8083063 Iustin Pop
      f = open(i, 'r')
1527 a8083063 Iustin Pop
      try:
1528 a8083063 Iustin Pop
        keyarray.append(f.read())
1529 a8083063 Iustin Pop
      finally:
1530 a8083063 Iustin Pop
        f.close()
1531 a8083063 Iustin Pop
1532 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1533 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1534 a8083063 Iustin Pop
1535 a8083063 Iustin Pop
    if not result:
1536 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1537 a8083063 Iustin Pop
1538 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1539 a8083063 Iustin Pop
    _UpdateEtcHosts(new_node.name, new_node.primary_ip)
1540 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1541 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1542 a8083063 Iustin Pop
1543 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1544 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1545 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1546 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1547 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1548 16abfbc2 Alexander Schreiber
                                    10, False):
1549 3ecf6786 Iustin Pop
        raise errors.OpExecError("Node claims it doesn't have the"
1550 3ecf6786 Iustin Pop
                                 " secondary ip you gave (%s).\n"
1551 3ecf6786 Iustin Pop
                                 "Please fix and re-run this command." %
1552 3ecf6786 Iustin Pop
                                 new_node.secondary_ip)
1553 a8083063 Iustin Pop
1554 ff98055b Iustin Pop
    success, msg = ssh.VerifyNodeHostname(node)
1555 ff98055b Iustin Pop
    if not success:
1556 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1557 ff98055b Iustin Pop
                               " than the one the resolver gives: %s.\n"
1558 ff98055b Iustin Pop
                               "Please fix and re-run this command." %
1559 ff98055b Iustin Pop
                               (node, msg))
1560 ff98055b Iustin Pop
1561 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1562 a8083063 Iustin Pop
    # including the node just added
1563 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1564 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1565 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1566 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1567 a8083063 Iustin Pop
1568 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1569 82122173 Iustin Pop
    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
1570 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1571 a8083063 Iustin Pop
      for to_node in dist_nodes:
1572 a8083063 Iustin Pop
        if not result[to_node]:
1573 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1574 a8083063 Iustin Pop
                       (fname, to_node))
1575 a8083063 Iustin Pop
1576 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1577 a8083063 Iustin Pop
    for fname in to_copy:
1578 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1579 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1580 a8083063 Iustin Pop
1581 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1582 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1583 a8083063 Iustin Pop
1584 a8083063 Iustin Pop
1585 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1586 a8083063 Iustin Pop
  """Failover the master node to the current node.
1587 a8083063 Iustin Pop

1588 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1589 a8083063 Iustin Pop

1590 a8083063 Iustin Pop
  """
1591 a8083063 Iustin Pop
  HPATH = "master-failover"
1592 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1593 a8083063 Iustin Pop
  REQ_MASTER = False
1594 a8083063 Iustin Pop
  _OP_REQP = []
1595 a8083063 Iustin Pop
1596 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1597 a8083063 Iustin Pop
    """Build hooks env.
1598 a8083063 Iustin Pop

1599 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1600 a8083063 Iustin Pop
    the nodes in the post phase.
1601 a8083063 Iustin Pop

1602 a8083063 Iustin Pop
    """
1603 a8083063 Iustin Pop
    env = {
1604 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1605 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1606 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1607 a8083063 Iustin Pop
      }
1608 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1609 a8083063 Iustin Pop
1610 a8083063 Iustin Pop
  def CheckPrereq(self):
1611 a8083063 Iustin Pop
    """Check prerequisites.
1612 a8083063 Iustin Pop

1613 a8083063 Iustin Pop
    This checks that we are not already the master.
1614 a8083063 Iustin Pop

1615 a8083063 Iustin Pop
    """
1616 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1617 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1618 a8083063 Iustin Pop
1619 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1620 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1621 3ecf6786 Iustin Pop
                                 " where you want the new master to be.\n"
1622 3ecf6786 Iustin Pop
                                 "%s is already the master" %
1623 3ecf6786 Iustin Pop
                                 self.old_master)
1624 a8083063 Iustin Pop
1625 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1626 a8083063 Iustin Pop
    """Failover the master node.
1627 a8083063 Iustin Pop

1628 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1629 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1630 a8083063 Iustin Pop
    master.
1631 a8083063 Iustin Pop

1632 a8083063 Iustin Pop
    """
1633 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1634 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1635 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1636 a8083063 Iustin Pop
1637 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1638 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1639 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1640 a8083063 Iustin Pop
1641 880478f8 Iustin Pop
    ss = self.sstore
1642 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1643 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1644 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1645 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1646 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1647 880478f8 Iustin Pop
1648 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1649 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1650 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1651 880478f8 Iustin Pop
      feedback_fn("Error in activating the master IP on the new master,\n"
1652 880478f8 Iustin Pop
                  "please fix manually.")
1653 a8083063 Iustin Pop
1654 a8083063 Iustin Pop
1655 a8083063 Iustin Pop
1656 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1657 a8083063 Iustin Pop
  """Query cluster configuration.
1658 a8083063 Iustin Pop

1659 a8083063 Iustin Pop
  """
1660 a8083063 Iustin Pop
  _OP_REQP = []
1661 59322403 Iustin Pop
  REQ_MASTER = False
1662 a8083063 Iustin Pop
1663 a8083063 Iustin Pop
  def CheckPrereq(self):
1664 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1665 a8083063 Iustin Pop

1666 a8083063 Iustin Pop
    """
1667 a8083063 Iustin Pop
    pass
1668 a8083063 Iustin Pop
1669 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1670 a8083063 Iustin Pop
    """Return cluster config.
1671 a8083063 Iustin Pop

1672 a8083063 Iustin Pop
    """
1673 a8083063 Iustin Pop
    result = {
1674 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1675 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1676 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1677 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1678 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1679 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1680 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1681 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1682 a8083063 Iustin Pop
      }
1683 a8083063 Iustin Pop
1684 a8083063 Iustin Pop
    return result
1685 a8083063 Iustin Pop
1686 a8083063 Iustin Pop
1687 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1688 a8083063 Iustin Pop
  """Copy file to cluster.
1689 a8083063 Iustin Pop

1690 a8083063 Iustin Pop
  """
1691 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1692 a8083063 Iustin Pop
1693 a8083063 Iustin Pop
  def CheckPrereq(self):
1694 a8083063 Iustin Pop
    """Check prerequisites.
1695 a8083063 Iustin Pop

1696 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1697 a8083063 Iustin Pop
    of nodes is valid.
1698 a8083063 Iustin Pop

1699 a8083063 Iustin Pop
    """
1700 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1701 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1702 dcb93971 Michael Hanselmann
1703 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1704 a8083063 Iustin Pop
1705 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1706 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1707 a8083063 Iustin Pop

1708 a8083063 Iustin Pop
    Args:
1709 a8083063 Iustin Pop
      opts - class with options as members
1710 a8083063 Iustin Pop
      args - list containing a single element, the file name
1711 a8083063 Iustin Pop
    Opts used:
1712 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1713 a8083063 Iustin Pop

1714 a8083063 Iustin Pop
    """
1715 a8083063 Iustin Pop
    filename = self.op.filename
1716 a8083063 Iustin Pop
1717 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
1718 a8083063 Iustin Pop
1719 a7ba5e53 Iustin Pop
    for node in self.nodes:
1720 a8083063 Iustin Pop
      if node == myname:
1721 a8083063 Iustin Pop
        continue
1722 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1723 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1724 a8083063 Iustin Pop
1725 a8083063 Iustin Pop
1726 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1727 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1728 a8083063 Iustin Pop

1729 a8083063 Iustin Pop
  """
1730 a8083063 Iustin Pop
  _OP_REQP = []
1731 a8083063 Iustin Pop
1732 a8083063 Iustin Pop
  def CheckPrereq(self):
1733 a8083063 Iustin Pop
    """No prerequisites.
1734 a8083063 Iustin Pop

1735 a8083063 Iustin Pop
    """
1736 a8083063 Iustin Pop
    pass
1737 a8083063 Iustin Pop
1738 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1739 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1740 a8083063 Iustin Pop

1741 a8083063 Iustin Pop
    """
1742 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1743 a8083063 Iustin Pop
1744 a8083063 Iustin Pop
1745 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1746 a8083063 Iustin Pop
  """Run a command on some nodes.
1747 a8083063 Iustin Pop

1748 a8083063 Iustin Pop
  """
1749 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1750 a8083063 Iustin Pop
1751 a8083063 Iustin Pop
  def CheckPrereq(self):
1752 a8083063 Iustin Pop
    """Check prerequisites.
1753 a8083063 Iustin Pop

1754 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1755 a8083063 Iustin Pop

1756 a8083063 Iustin Pop
    """
1757 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1758 a8083063 Iustin Pop
1759 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1760 a8083063 Iustin Pop
    """Run a command on some nodes.
1761 a8083063 Iustin Pop

1762 a8083063 Iustin Pop
    """
1763 a8083063 Iustin Pop
    data = []
1764 a8083063 Iustin Pop
    for node in self.nodes:
1765 a7ba5e53 Iustin Pop
      result = ssh.SSHCall(node, "root", self.op.command)
1766 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1767 a8083063 Iustin Pop
1768 a8083063 Iustin Pop
    return data
1769 a8083063 Iustin Pop
1770 a8083063 Iustin Pop
1771 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1772 a8083063 Iustin Pop
  """Bring up an instance's disks.
1773 a8083063 Iustin Pop

1774 a8083063 Iustin Pop
  """
1775 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1776 a8083063 Iustin Pop
1777 a8083063 Iustin Pop
  def CheckPrereq(self):
1778 a8083063 Iustin Pop
    """Check prerequisites.
1779 a8083063 Iustin Pop

1780 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1781 a8083063 Iustin Pop

1782 a8083063 Iustin Pop
    """
1783 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1784 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1785 a8083063 Iustin Pop
    if instance is None:
1786 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1787 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1788 a8083063 Iustin Pop
    self.instance = instance
1789 a8083063 Iustin Pop
1790 a8083063 Iustin Pop
1791 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1792 a8083063 Iustin Pop
    """Activate the disks.
1793 a8083063 Iustin Pop

1794 a8083063 Iustin Pop
    """
1795 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1796 a8083063 Iustin Pop
    if not disks_ok:
1797 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1798 a8083063 Iustin Pop
1799 a8083063 Iustin Pop
    return disks_info
1800 a8083063 Iustin Pop
1801 a8083063 Iustin Pop
1802 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1803 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1804 a8083063 Iustin Pop

1805 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1806 a8083063 Iustin Pop

1807 a8083063 Iustin Pop
  Args:
1808 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1809 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1810 a8083063 Iustin Pop
                        in an error return from the function
1811 a8083063 Iustin Pop

1812 a8083063 Iustin Pop
  Returns:
1813 a8083063 Iustin Pop
    false if the operation failed
1814 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1815 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1816 a8083063 Iustin Pop
  """
1817 a8083063 Iustin Pop
  device_info = []
1818 a8083063 Iustin Pop
  disks_ok = True
1819 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1820 a8083063 Iustin Pop
    master_result = None
1821 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1822 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1823 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1824 3f78eef2 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk,
1825 3f78eef2 Iustin Pop
                                          instance.name, is_primary)
1826 a8083063 Iustin Pop
      if not result:
1827 a8083063 Iustin Pop
        logger.Error("could not prepare block device %s on node %s (is_pri"
1828 a8083063 Iustin Pop
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1829 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1830 a8083063 Iustin Pop
          disks_ok = False
1831 a8083063 Iustin Pop
      if is_primary:
1832 a8083063 Iustin Pop
        master_result = result
1833 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1834 a8083063 Iustin Pop
                        master_result))
1835 a8083063 Iustin Pop
1836 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1837 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1838 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1839 b352ab5b Iustin Pop
  for disk in instance.disks:
1840 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1841 b352ab5b Iustin Pop
1842 a8083063 Iustin Pop
  return disks_ok, device_info
1843 a8083063 Iustin Pop
1844 a8083063 Iustin Pop
1845 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1846 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1847 3ecf6786 Iustin Pop

1848 3ecf6786 Iustin Pop
  """
1849 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1850 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1851 fe7b0351 Michael Hanselmann
  if not disks_ok:
1852 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1853 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1854 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1855 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1856 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1857 fe7b0351 Michael Hanselmann
1858 fe7b0351 Michael Hanselmann
1859 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1860 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1861 a8083063 Iustin Pop

1862 a8083063 Iustin Pop
  """
1863 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1864 a8083063 Iustin Pop
1865 a8083063 Iustin Pop
  def CheckPrereq(self):
1866 a8083063 Iustin Pop
    """Check prerequisites.
1867 a8083063 Iustin Pop

1868 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1869 a8083063 Iustin Pop

1870 a8083063 Iustin Pop
    """
1871 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1872 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1873 a8083063 Iustin Pop
    if instance is None:
1874 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1875 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1876 a8083063 Iustin Pop
    self.instance = instance
1877 a8083063 Iustin Pop
1878 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1879 a8083063 Iustin Pop
    """Deactivate the disks
1880 a8083063 Iustin Pop

1881 a8083063 Iustin Pop
    """
1882 a8083063 Iustin Pop
    instance = self.instance
1883 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1884 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1885 a8083063 Iustin Pop
    if not type(ins_l) is list:
1886 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1887 3ecf6786 Iustin Pop
                               instance.primary_node)
1888 a8083063 Iustin Pop
1889 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1890 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1891 3ecf6786 Iustin Pop
                               " block devices.")
1892 a8083063 Iustin Pop
1893 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1894 a8083063 Iustin Pop
1895 a8083063 Iustin Pop
1896 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1897 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1898 a8083063 Iustin Pop

1899 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1900 a8083063 Iustin Pop

1901 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1902 a8083063 Iustin Pop
  ignored.
1903 a8083063 Iustin Pop

1904 a8083063 Iustin Pop
  """
1905 a8083063 Iustin Pop
  result = True
1906 a8083063 Iustin Pop
  for disk in instance.disks:
1907 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1908 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1909 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1910 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1911 a8083063 Iustin Pop
                     (disk.iv_name, node))
1912 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1913 a8083063 Iustin Pop
          result = False
1914 a8083063 Iustin Pop
  return result
1915 a8083063 Iustin Pop
1916 a8083063 Iustin Pop
1917 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1918 a8083063 Iustin Pop
  """Starts an instance.
1919 a8083063 Iustin Pop

1920 a8083063 Iustin Pop
  """
1921 a8083063 Iustin Pop
  HPATH = "instance-start"
1922 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1923 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1924 a8083063 Iustin Pop
1925 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1926 a8083063 Iustin Pop
    """Build hooks env.
1927 a8083063 Iustin Pop

1928 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1929 a8083063 Iustin Pop

1930 a8083063 Iustin Pop
    """
1931 a8083063 Iustin Pop
    env = {
1932 a8083063 Iustin Pop
      "FORCE": self.op.force,
1933 a8083063 Iustin Pop
      }
1934 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1935 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1936 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1937 a8083063 Iustin Pop
    return env, nl, nl
1938 a8083063 Iustin Pop
1939 a8083063 Iustin Pop
  def CheckPrereq(self):
1940 a8083063 Iustin Pop
    """Check prerequisites.
1941 a8083063 Iustin Pop

1942 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1943 a8083063 Iustin Pop

1944 a8083063 Iustin Pop
    """
1945 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1946 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1947 a8083063 Iustin Pop
    if instance is None:
1948 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1949 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1950 a8083063 Iustin Pop
1951 a8083063 Iustin Pop
    # check bridges existance
1952 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
1953 a8083063 Iustin Pop
1954 a8083063 Iustin Pop
    self.instance = instance
1955 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1956 a8083063 Iustin Pop
1957 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1958 a8083063 Iustin Pop
    """Start the instance.
1959 a8083063 Iustin Pop

1960 a8083063 Iustin Pop
    """
1961 a8083063 Iustin Pop
    instance = self.instance
1962 a8083063 Iustin Pop
    force = self.op.force
1963 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1964 a8083063 Iustin Pop
1965 a8083063 Iustin Pop
    node_current = instance.primary_node
1966 a8083063 Iustin Pop
1967 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1968 a8083063 Iustin Pop
    if not nodeinfo:
1969 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact node %s for infos" %
1970 3ecf6786 Iustin Pop
                               (node_current))
1971 a8083063 Iustin Pop
1972 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
1973 a8083063 Iustin Pop
    memory = instance.memory
1974 a8083063 Iustin Pop
    if memory > freememory:
1975 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to start instance"
1976 3ecf6786 Iustin Pop
                               " %s on node %s"
1977 3ecf6786 Iustin Pop
                               " needed %s MiB, available %s MiB" %
1978 3ecf6786 Iustin Pop
                               (instance.name, node_current, memory,
1979 3ecf6786 Iustin Pop
                                freememory))
1980 a8083063 Iustin Pop
1981 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
1982 a8083063 Iustin Pop
1983 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
1984 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
1985 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
1986 a8083063 Iustin Pop
1987 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
1988 a8083063 Iustin Pop
1989 a8083063 Iustin Pop
1990 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
1991 bf6929a2 Alexander Schreiber
  """Reboot an instance.
1992 bf6929a2 Alexander Schreiber

1993 bf6929a2 Alexander Schreiber
  """
1994 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
1995 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
1996 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
1997 bf6929a2 Alexander Schreiber
1998 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
1999 bf6929a2 Alexander Schreiber
    """Build hooks env.
2000 bf6929a2 Alexander Schreiber

2001 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2002 bf6929a2 Alexander Schreiber

2003 bf6929a2 Alexander Schreiber
    """
2004 bf6929a2 Alexander Schreiber
    env = {
2005 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2006 bf6929a2 Alexander Schreiber
      }
2007 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2008 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2009 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2010 bf6929a2 Alexander Schreiber
    return env, nl, nl
2011 bf6929a2 Alexander Schreiber
2012 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2013 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2014 bf6929a2 Alexander Schreiber

2015 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2016 bf6929a2 Alexander Schreiber

2017 bf6929a2 Alexander Schreiber
    """
2018 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2019 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2020 bf6929a2 Alexander Schreiber
    if instance is None:
2021 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2022 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2023 bf6929a2 Alexander Schreiber
2024 bf6929a2 Alexander Schreiber
    # check bridges existance
2025 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2026 bf6929a2 Alexander Schreiber
2027 bf6929a2 Alexander Schreiber
    self.instance = instance
2028 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2029 bf6929a2 Alexander Schreiber
2030 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2031 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2032 bf6929a2 Alexander Schreiber

2033 bf6929a2 Alexander Schreiber
    """
2034 bf6929a2 Alexander Schreiber
    instance = self.instance
2035 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2036 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2037 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2038 bf6929a2 Alexander Schreiber
2039 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2040 bf6929a2 Alexander Schreiber
2041 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2042 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2043 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2044 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2045 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2046 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2047 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2048 bf6929a2 Alexander Schreiber
2049 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2050 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2051 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2052 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2053 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2054 bf6929a2 Alexander Schreiber
    else:
2055 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2056 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2057 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2058 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2059 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2060 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2061 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2062 bf6929a2 Alexander Schreiber
2063 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2064 bf6929a2 Alexander Schreiber
2065 bf6929a2 Alexander Schreiber
2066 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2067 a8083063 Iustin Pop
  """Shutdown an instance.
2068 a8083063 Iustin Pop

2069 a8083063 Iustin Pop
  """
2070 a8083063 Iustin Pop
  HPATH = "instance-stop"
2071 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2072 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2073 a8083063 Iustin Pop
2074 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2075 a8083063 Iustin Pop
    """Build hooks env.
2076 a8083063 Iustin Pop

2077 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2078 a8083063 Iustin Pop

2079 a8083063 Iustin Pop
    """
2080 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2081 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2082 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2083 a8083063 Iustin Pop
    return env, nl, nl
2084 a8083063 Iustin Pop
2085 a8083063 Iustin Pop
  def CheckPrereq(self):
2086 a8083063 Iustin Pop
    """Check prerequisites.
2087 a8083063 Iustin Pop

2088 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2089 a8083063 Iustin Pop

2090 a8083063 Iustin Pop
    """
2091 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2092 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2093 a8083063 Iustin Pop
    if instance is None:
2094 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2095 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2096 a8083063 Iustin Pop
    self.instance = instance
2097 a8083063 Iustin Pop
2098 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2099 a8083063 Iustin Pop
    """Shutdown the instance.
2100 a8083063 Iustin Pop

2101 a8083063 Iustin Pop
    """
2102 a8083063 Iustin Pop
    instance = self.instance
2103 a8083063 Iustin Pop
    node_current = instance.primary_node
2104 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2105 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2106 a8083063 Iustin Pop
2107 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2108 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2109 a8083063 Iustin Pop
2110 a8083063 Iustin Pop
2111 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2112 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2113 fe7b0351 Michael Hanselmann

2114 fe7b0351 Michael Hanselmann
  """
2115 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2116 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2117 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2118 fe7b0351 Michael Hanselmann
2119 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2120 fe7b0351 Michael Hanselmann
    """Build hooks env.
2121 fe7b0351 Michael Hanselmann

2122 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2123 fe7b0351 Michael Hanselmann

2124 fe7b0351 Michael Hanselmann
    """
2125 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2126 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2127 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2128 fe7b0351 Michael Hanselmann
    return env, nl, nl
2129 fe7b0351 Michael Hanselmann
2130 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2131 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2132 fe7b0351 Michael Hanselmann

2133 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2134 fe7b0351 Michael Hanselmann

2135 fe7b0351 Michael Hanselmann
    """
2136 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2137 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2138 fe7b0351 Michael Hanselmann
    if instance is None:
2139 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2140 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2141 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2142 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2143 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2144 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2145 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2146 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2147 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2148 fe7b0351 Michael Hanselmann
    if remote_info:
2149 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2150 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2151 3ecf6786 Iustin Pop
                                  instance.primary_node))
2152 d0834de3 Michael Hanselmann
2153 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2154 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2155 d0834de3 Michael Hanselmann
      # OS verification
2156 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2157 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2158 d0834de3 Michael Hanselmann
      if pnode is None:
2159 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2160 3ecf6786 Iustin Pop
                                   self.op.pnode)
2161 d0834de3 Michael Hanselmann
      os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2162 d0834de3 Michael Hanselmann
      if not isinstance(os_obj, objects.OS):
2163 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2164 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2165 d0834de3 Michael Hanselmann
2166 fe7b0351 Michael Hanselmann
    self.instance = instance
2167 fe7b0351 Michael Hanselmann
2168 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2169 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2170 fe7b0351 Michael Hanselmann

2171 fe7b0351 Michael Hanselmann
    """
2172 fe7b0351 Michael Hanselmann
    inst = self.instance
2173 fe7b0351 Michael Hanselmann
2174 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2175 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2176 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2177 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2178 d0834de3 Michael Hanselmann
2179 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2180 fe7b0351 Michael Hanselmann
    try:
2181 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2182 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2183 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not install OS for instance %s "
2184 3ecf6786 Iustin Pop
                                 "on node %s" %
2185 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2186 fe7b0351 Michael Hanselmann
    finally:
2187 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2188 fe7b0351 Michael Hanselmann
2189 fe7b0351 Michael Hanselmann
2190 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2191 decd5f45 Iustin Pop
  """Rename an instance.
2192 decd5f45 Iustin Pop

2193 decd5f45 Iustin Pop
  """
2194 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2195 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2196 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2197 decd5f45 Iustin Pop
2198 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2199 decd5f45 Iustin Pop
    """Build hooks env.
2200 decd5f45 Iustin Pop

2201 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2202 decd5f45 Iustin Pop

2203 decd5f45 Iustin Pop
    """
2204 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2205 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2206 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2207 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2208 decd5f45 Iustin Pop
    return env, nl, nl
2209 decd5f45 Iustin Pop
2210 decd5f45 Iustin Pop
  def CheckPrereq(self):
2211 decd5f45 Iustin Pop
    """Check prerequisites.
2212 decd5f45 Iustin Pop

2213 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2214 decd5f45 Iustin Pop

2215 decd5f45 Iustin Pop
    """
2216 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2217 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2218 decd5f45 Iustin Pop
    if instance is None:
2219 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2220 decd5f45 Iustin Pop
                                 self.op.instance_name)
2221 decd5f45 Iustin Pop
    if instance.status != "down":
2222 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2223 decd5f45 Iustin Pop
                                 self.op.instance_name)
2224 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2225 decd5f45 Iustin Pop
    if remote_info:
2226 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2227 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2228 decd5f45 Iustin Pop
                                  instance.primary_node))
2229 decd5f45 Iustin Pop
    self.instance = instance
2230 decd5f45 Iustin Pop
2231 decd5f45 Iustin Pop
    # new name verification
2232 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2233 decd5f45 Iustin Pop
2234 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2235 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2236 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2237 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2238 decd5f45 Iustin Pop
      if not result.failed:
2239 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2240 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2241 decd5f45 Iustin Pop
2242 decd5f45 Iustin Pop
2243 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2244 decd5f45 Iustin Pop
    """Reinstall the instance.
2245 decd5f45 Iustin Pop

2246 decd5f45 Iustin Pop
    """
2247 decd5f45 Iustin Pop
    inst = self.instance
2248 decd5f45 Iustin Pop
    old_name = inst.name
2249 decd5f45 Iustin Pop
2250 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2251 decd5f45 Iustin Pop
2252 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2253 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2254 decd5f45 Iustin Pop
2255 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2256 decd5f45 Iustin Pop
    try:
2257 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2258 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2259 decd5f45 Iustin Pop
        msg = ("Could run OS rename script for instance %s\n"
2260 decd5f45 Iustin Pop
               "on node %s\n"
2261 decd5f45 Iustin Pop
               "(but the instance has been renamed in Ganeti)" %
2262 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2263 decd5f45 Iustin Pop
        logger.Error(msg)
2264 decd5f45 Iustin Pop
    finally:
2265 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2266 decd5f45 Iustin Pop
2267 decd5f45 Iustin Pop
2268 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2269 a8083063 Iustin Pop
  """Remove an instance.
2270 a8083063 Iustin Pop

2271 a8083063 Iustin Pop
  """
2272 a8083063 Iustin Pop
  HPATH = "instance-remove"
2273 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2274 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2275 a8083063 Iustin Pop
2276 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2277 a8083063 Iustin Pop
    """Build hooks env.
2278 a8083063 Iustin Pop

2279 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2280 a8083063 Iustin Pop

2281 a8083063 Iustin Pop
    """
2282 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2283 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2284 a8083063 Iustin Pop
    return env, nl, nl
2285 a8083063 Iustin Pop
2286 a8083063 Iustin Pop
  def CheckPrereq(self):
2287 a8083063 Iustin Pop
    """Check prerequisites.
2288 a8083063 Iustin Pop

2289 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2290 a8083063 Iustin Pop

2291 a8083063 Iustin Pop
    """
2292 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2293 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2294 a8083063 Iustin Pop
    if instance is None:
2295 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2296 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2297 a8083063 Iustin Pop
    self.instance = instance
2298 a8083063 Iustin Pop
2299 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2300 a8083063 Iustin Pop
    """Remove the instance.
2301 a8083063 Iustin Pop

2302 a8083063 Iustin Pop
    """
2303 a8083063 Iustin Pop
    instance = self.instance
2304 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2305 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2306 a8083063 Iustin Pop
2307 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2308 1d67656e Iustin Pop
      if self.op.ignore_failures:
2309 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2310 1d67656e Iustin Pop
      else:
2311 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2312 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2313 a8083063 Iustin Pop
2314 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2315 a8083063 Iustin Pop
2316 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2317 1d67656e Iustin Pop
      if self.op.ignore_failures:
2318 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2319 1d67656e Iustin Pop
      else:
2320 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2321 a8083063 Iustin Pop
2322 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2323 a8083063 Iustin Pop
2324 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2325 a8083063 Iustin Pop
2326 a8083063 Iustin Pop
2327 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2328 a8083063 Iustin Pop
  """Logical unit for querying instances.
2329 a8083063 Iustin Pop

2330 a8083063 Iustin Pop
  """
2331 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2332 a8083063 Iustin Pop
2333 a8083063 Iustin Pop
  def CheckPrereq(self):
2334 a8083063 Iustin Pop
    """Check prerequisites.
2335 a8083063 Iustin Pop

2336 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2337 a8083063 Iustin Pop

2338 a8083063 Iustin Pop
    """
2339 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
2340 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2341 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2342 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2343 644eeef9 Iustin Pop
                               "sda_size", "sdb_size"],
2344 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2345 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2346 a8083063 Iustin Pop
2347 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2348 069dcc86 Iustin Pop
2349 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2350 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2351 a8083063 Iustin Pop

2352 a8083063 Iustin Pop
    """
2353 069dcc86 Iustin Pop
    instance_names = self.wanted
2354 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2355 a8083063 Iustin Pop
                     in instance_names]
2356 a8083063 Iustin Pop
2357 a8083063 Iustin Pop
    # begin data gathering
2358 a8083063 Iustin Pop
2359 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2360 a8083063 Iustin Pop
2361 a8083063 Iustin Pop
    bad_nodes = []
2362 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2363 a8083063 Iustin Pop
      live_data = {}
2364 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2365 a8083063 Iustin Pop
      for name in nodes:
2366 a8083063 Iustin Pop
        result = node_data[name]
2367 a8083063 Iustin Pop
        if result:
2368 a8083063 Iustin Pop
          live_data.update(result)
2369 a8083063 Iustin Pop
        elif result == False:
2370 a8083063 Iustin Pop
          bad_nodes.append(name)
2371 a8083063 Iustin Pop
        # else no instance is alive
2372 a8083063 Iustin Pop
    else:
2373 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2374 a8083063 Iustin Pop
2375 a8083063 Iustin Pop
    # end data gathering
2376 a8083063 Iustin Pop
2377 a8083063 Iustin Pop
    output = []
2378 a8083063 Iustin Pop
    for instance in instance_list:
2379 a8083063 Iustin Pop
      iout = []
2380 a8083063 Iustin Pop
      for field in self.op.output_fields:
2381 a8083063 Iustin Pop
        if field == "name":
2382 a8083063 Iustin Pop
          val = instance.name
2383 a8083063 Iustin Pop
        elif field == "os":
2384 a8083063 Iustin Pop
          val = instance.os
2385 a8083063 Iustin Pop
        elif field == "pnode":
2386 a8083063 Iustin Pop
          val = instance.primary_node
2387 a8083063 Iustin Pop
        elif field == "snodes":
2388 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2389 a8083063 Iustin Pop
        elif field == "admin_state":
2390 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2391 a8083063 Iustin Pop
        elif field == "oper_state":
2392 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2393 8a23d2d3 Iustin Pop
            val = None
2394 a8083063 Iustin Pop
          else:
2395 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2396 a8083063 Iustin Pop
        elif field == "admin_ram":
2397 a8083063 Iustin Pop
          val = instance.memory
2398 a8083063 Iustin Pop
        elif field == "oper_ram":
2399 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2400 8a23d2d3 Iustin Pop
            val = None
2401 a8083063 Iustin Pop
          elif instance.name in live_data:
2402 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2403 a8083063 Iustin Pop
          else:
2404 a8083063 Iustin Pop
            val = "-"
2405 a8083063 Iustin Pop
        elif field == "disk_template":
2406 a8083063 Iustin Pop
          val = instance.disk_template
2407 a8083063 Iustin Pop
        elif field == "ip":
2408 a8083063 Iustin Pop
          val = instance.nics[0].ip
2409 a8083063 Iustin Pop
        elif field == "bridge":
2410 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2411 a8083063 Iustin Pop
        elif field == "mac":
2412 a8083063 Iustin Pop
          val = instance.nics[0].mac
2413 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2414 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2415 644eeef9 Iustin Pop
          if disk is None:
2416 8a23d2d3 Iustin Pop
            val = None
2417 644eeef9 Iustin Pop
          else:
2418 644eeef9 Iustin Pop
            val = disk.size
2419 a8083063 Iustin Pop
        else:
2420 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2421 a8083063 Iustin Pop
        iout.append(val)
2422 a8083063 Iustin Pop
      output.append(iout)
2423 a8083063 Iustin Pop
2424 a8083063 Iustin Pop
    return output
2425 a8083063 Iustin Pop
2426 a8083063 Iustin Pop
2427 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2428 a8083063 Iustin Pop
  """Failover an instance.
2429 a8083063 Iustin Pop

2430 a8083063 Iustin Pop
  """
2431 a8083063 Iustin Pop
  HPATH = "instance-failover"
2432 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2433 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2434 a8083063 Iustin Pop
2435 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2436 a8083063 Iustin Pop
    """Build hooks env.
2437 a8083063 Iustin Pop

2438 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2439 a8083063 Iustin Pop

2440 a8083063 Iustin Pop
    """
2441 a8083063 Iustin Pop
    env = {
2442 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2443 a8083063 Iustin Pop
      }
2444 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2445 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2446 a8083063 Iustin Pop
    return env, nl, nl
2447 a8083063 Iustin Pop
2448 a8083063 Iustin Pop
  def CheckPrereq(self):
2449 a8083063 Iustin Pop
    """Check prerequisites.
2450 a8083063 Iustin Pop

2451 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2452 a8083063 Iustin Pop

2453 a8083063 Iustin Pop
    """
2454 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2455 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2456 a8083063 Iustin Pop
    if instance is None:
2457 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2458 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2459 a8083063 Iustin Pop
2460 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2461 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2462 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2463 2a710df1 Michael Hanselmann
2464 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2465 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2466 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2467 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2468 2a710df1 Michael Hanselmann
2469 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2470 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2471 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2472 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2473 3a7c308e Guido Trotter
    if not info:
2474 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
2475 3ecf6786 Iustin Pop
                                 " from node '%s'" % nodeinfo)
2476 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2477 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Not enough memory on target node %s."
2478 3ecf6786 Iustin Pop
                                 " %d MB available, %d MB required" %
2479 3ecf6786 Iustin Pop
                                 (target_node, info['memory_free'],
2480 3ecf6786 Iustin Pop
                                  instance.memory))
2481 3a7c308e Guido Trotter
2482 a8083063 Iustin Pop
    # check bridge existance
2483 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2484 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2485 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2486 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2487 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2488 a8083063 Iustin Pop
2489 a8083063 Iustin Pop
    self.instance = instance
2490 a8083063 Iustin Pop
2491 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2492 a8083063 Iustin Pop
    """Failover an instance.
2493 a8083063 Iustin Pop

2494 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2495 a8083063 Iustin Pop
    starting it on the secondary.
2496 a8083063 Iustin Pop

2497 a8083063 Iustin Pop
    """
2498 a8083063 Iustin Pop
    instance = self.instance
2499 a8083063 Iustin Pop
2500 a8083063 Iustin Pop
    source_node = instance.primary_node
2501 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2502 a8083063 Iustin Pop
2503 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2504 a8083063 Iustin Pop
    for dev in instance.disks:
2505 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2506 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2507 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2508 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2509 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2510 a8083063 Iustin Pop
2511 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2512 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2513 a8083063 Iustin Pop
2514 a8083063 Iustin Pop
    if not nodeinfo:
2515 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact target node %s." %
2516 3ecf6786 Iustin Pop
                               target_node)
2517 a8083063 Iustin Pop
2518 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2519 a8083063 Iustin Pop
    memory = instance.memory
2520 a8083063 Iustin Pop
    if memory > free_memory:
2521 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to create instance %s on"
2522 3ecf6786 Iustin Pop
                               " node %s. needed %s MiB, available %s MiB" %
2523 3ecf6786 Iustin Pop
                               (instance.name, target_node, memory,
2524 3ecf6786 Iustin Pop
                                free_memory))
2525 a8083063 Iustin Pop
2526 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2527 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2528 a8083063 Iustin Pop
                (instance.name, source_node))
2529 a8083063 Iustin Pop
2530 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2531 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2532 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2533 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2534 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2535 24a40d57 Iustin Pop
      else:
2536 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2537 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2538 a8083063 Iustin Pop
2539 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2540 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2541 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2542 a8083063 Iustin Pop
2543 a8083063 Iustin Pop
    instance.primary_node = target_node
2544 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2545 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2546 a8083063 Iustin Pop
2547 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2548 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2549 a8083063 Iustin Pop
                (instance.name, target_node))
2550 a8083063 Iustin Pop
2551 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2552 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2553 a8083063 Iustin Pop
    if not disks_ok:
2554 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2555 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2556 a8083063 Iustin Pop
2557 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2558 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2559 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2560 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2561 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2562 a8083063 Iustin Pop
2563 a8083063 Iustin Pop
2564 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2565 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2566 a8083063 Iustin Pop

2567 a8083063 Iustin Pop
  This always creates all devices.
2568 a8083063 Iustin Pop

2569 a8083063 Iustin Pop
  """
2570 a8083063 Iustin Pop
  if device.children:
2571 a8083063 Iustin Pop
    for child in device.children:
2572 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2573 a8083063 Iustin Pop
        return False
2574 a8083063 Iustin Pop
2575 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2576 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2577 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2578 a8083063 Iustin Pop
  if not new_id:
2579 a8083063 Iustin Pop
    return False
2580 a8083063 Iustin Pop
  if device.physical_id is None:
2581 a8083063 Iustin Pop
    device.physical_id = new_id
2582 a8083063 Iustin Pop
  return True
2583 a8083063 Iustin Pop
2584 a8083063 Iustin Pop
2585 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2586 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2587 a8083063 Iustin Pop

2588 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2589 a8083063 Iustin Pop
  all its children.
2590 a8083063 Iustin Pop

2591 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2592 a8083063 Iustin Pop

2593 a8083063 Iustin Pop
  """
2594 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2595 a8083063 Iustin Pop
    force = True
2596 a8083063 Iustin Pop
  if device.children:
2597 a8083063 Iustin Pop
    for child in device.children:
2598 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2599 3f78eef2 Iustin Pop
                                        child, force, info):
2600 a8083063 Iustin Pop
        return False
2601 a8083063 Iustin Pop
2602 a8083063 Iustin Pop
  if not force:
2603 a8083063 Iustin Pop
    return True
2604 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2605 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2606 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2607 a8083063 Iustin Pop
  if not new_id:
2608 a8083063 Iustin Pop
    return False
2609 a8083063 Iustin Pop
  if device.physical_id is None:
2610 a8083063 Iustin Pop
    device.physical_id = new_id
2611 a8083063 Iustin Pop
  return True
2612 a8083063 Iustin Pop
2613 a8083063 Iustin Pop
2614 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2615 923b1523 Iustin Pop
  """Generate a suitable LV name.
2616 923b1523 Iustin Pop

2617 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2618 923b1523 Iustin Pop

2619 923b1523 Iustin Pop
  """
2620 923b1523 Iustin Pop
  results = []
2621 923b1523 Iustin Pop
  for val in exts:
2622 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2623 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2624 923b1523 Iustin Pop
  return results
2625 923b1523 Iustin Pop
2626 923b1523 Iustin Pop
2627 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2628 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2629 a8083063 Iustin Pop

2630 a8083063 Iustin Pop
  """
2631 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2632 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2633 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2634 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2635 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2636 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2637 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
2638 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2639 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2640 a8083063 Iustin Pop
  return drbd_dev
2641 a8083063 Iustin Pop
2642 a8083063 Iustin Pop
2643 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2644 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2645 a1f445d3 Iustin Pop

2646 a1f445d3 Iustin Pop
  """
2647 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2648 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2649 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2650 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2651 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2652 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2653 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2654 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2655 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2656 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2657 a1f445d3 Iustin Pop
  return drbd_dev
2658 a1f445d3 Iustin Pop
2659 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2660 a8083063 Iustin Pop
                          instance_name, primary_node,
2661 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2662 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2663 a8083063 Iustin Pop

2664 a8083063 Iustin Pop
  """
2665 a8083063 Iustin Pop
  #TODO: compute space requirements
2666 a8083063 Iustin Pop
2667 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2668 a8083063 Iustin Pop
  if template_name == "diskless":
2669 a8083063 Iustin Pop
    disks = []
2670 a8083063 Iustin Pop
  elif template_name == "plain":
2671 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2672 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2673 923b1523 Iustin Pop
2674 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2675 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2676 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2677 a8083063 Iustin Pop
                           iv_name = "sda")
2678 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2679 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2680 a8083063 Iustin Pop
                           iv_name = "sdb")
2681 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2682 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2683 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2684 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2685 923b1523 Iustin Pop
2686 923b1523 Iustin Pop
2687 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2688 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2689 fe96220b Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2690 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2691 fe96220b Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2692 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2693 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
2694 a8083063 Iustin Pop
                              size=disk_sz,
2695 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2696 fe96220b Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2697 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2698 fe96220b Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2699 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2700 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
2701 a8083063 Iustin Pop
                              size=swap_sz,
2702 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2703 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2704 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2705 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2706 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2707 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2708 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2709 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2710 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2711 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2712 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
2713 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2714 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2715 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2716 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
2717 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2718 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2719 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2720 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2721 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2722 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2723 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2724 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2725 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2726 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2727 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2728 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2729 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2730 a8083063 Iustin Pop
  else:
2731 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2732 a8083063 Iustin Pop
  return disks
2733 a8083063 Iustin Pop
2734 a8083063 Iustin Pop
2735 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2736 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2737 3ecf6786 Iustin Pop

2738 3ecf6786 Iustin Pop
  """
2739 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2740 a0c3fea1 Michael Hanselmann
2741 a0c3fea1 Michael Hanselmann
2742 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2743 a8083063 Iustin Pop
  """Create all disks for an instance.
2744 a8083063 Iustin Pop

2745 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2746 a8083063 Iustin Pop

2747 a8083063 Iustin Pop
  Args:
2748 a8083063 Iustin Pop
    instance: the instance object
2749 a8083063 Iustin Pop

2750 a8083063 Iustin Pop
  Returns:
2751 a8083063 Iustin Pop
    True or False showing the success of the creation process
2752 a8083063 Iustin Pop

2753 a8083063 Iustin Pop
  """
2754 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2755 a0c3fea1 Michael Hanselmann
2756 a8083063 Iustin Pop
  for device in instance.disks:
2757 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2758 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2759 a8083063 Iustin Pop
    #HARDCODE
2760 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2761 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2762 3f78eef2 Iustin Pop
                                        device, False, info):
2763 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2764 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2765 a8083063 Iustin Pop
        return False
2766 a8083063 Iustin Pop
    #HARDCODE
2767 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2768 3f78eef2 Iustin Pop
                                    instance, device, info):
2769 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2770 a8083063 Iustin Pop
                   device.iv_name)
2771 a8083063 Iustin Pop
      return False
2772 a8083063 Iustin Pop
  return True
2773 a8083063 Iustin Pop
2774 a8083063 Iustin Pop
2775 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2776 a8083063 Iustin Pop
  """Remove all disks for an instance.
2777 a8083063 Iustin Pop

2778 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2779 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2780 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2781 a8083063 Iustin Pop
  with `_CreateDisks()`).
2782 a8083063 Iustin Pop

2783 a8083063 Iustin Pop
  Args:
2784 a8083063 Iustin Pop
    instance: the instance object
2785 a8083063 Iustin Pop

2786 a8083063 Iustin Pop
  Returns:
2787 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2788 a8083063 Iustin Pop

2789 a8083063 Iustin Pop
  """
2790 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2791 a8083063 Iustin Pop
2792 a8083063 Iustin Pop
  result = True
2793 a8083063 Iustin Pop
  for device in instance.disks:
2794 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2795 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2796 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2797 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2798 a8083063 Iustin Pop
                     " continuing anyway" %
2799 a8083063 Iustin Pop
                     (device.iv_name, node))
2800 a8083063 Iustin Pop
        result = False
2801 a8083063 Iustin Pop
  return result
2802 a8083063 Iustin Pop
2803 a8083063 Iustin Pop
2804 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2805 a8083063 Iustin Pop
  """Create an instance.
2806 a8083063 Iustin Pop

2807 a8083063 Iustin Pop
  """
2808 a8083063 Iustin Pop
  HPATH = "instance-add"
2809 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2810 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2811 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2812 bdd55f71 Iustin Pop
              "wait_for_sync", "ip_check"]
2813 a8083063 Iustin Pop
2814 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2815 a8083063 Iustin Pop
    """Build hooks env.
2816 a8083063 Iustin Pop

2817 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2818 a8083063 Iustin Pop

2819 a8083063 Iustin Pop
    """
2820 a8083063 Iustin Pop
    env = {
2821 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2822 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2823 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2824 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2825 a8083063 Iustin Pop
      }
2826 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2827 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2828 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2829 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2830 396e1b78 Michael Hanselmann
2831 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2832 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2833 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2834 396e1b78 Michael Hanselmann
      status=self.instance_status,
2835 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2836 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2837 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2838 396e1b78 Michael Hanselmann
      nics=[(self.inst_ip, self.op.bridge)],
2839 396e1b78 Michael Hanselmann
    ))
2840 a8083063 Iustin Pop
2841 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2842 a8083063 Iustin Pop
          self.secondaries)
2843 a8083063 Iustin Pop
    return env, nl, nl
2844 a8083063 Iustin Pop
2845 a8083063 Iustin Pop
2846 a8083063 Iustin Pop
  def CheckPrereq(self):
2847 a8083063 Iustin Pop
    """Check prerequisites.
2848 a8083063 Iustin Pop

2849 a8083063 Iustin Pop
    """
2850 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2851 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2852 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2853 3ecf6786 Iustin Pop
                                 self.op.mode)
2854 a8083063 Iustin Pop
2855 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2856 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2857 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2858 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2859 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2860 3ecf6786 Iustin Pop
                                   " node and path options")
2861 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2862 a8083063 Iustin Pop
      if src_node_full is None:
2863 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2864 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2865 a8083063 Iustin Pop
2866 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2867 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2868 a8083063 Iustin Pop
2869 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2870 a8083063 Iustin Pop
2871 a8083063 Iustin Pop
      if not export_info:
2872 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2873 a8083063 Iustin Pop
2874 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2875 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2876 a8083063 Iustin Pop
2877 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2878 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2879 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2880 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2881 a8083063 Iustin Pop
2882 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2883 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2884 3ecf6786 Iustin Pop
                                   " one data disk")
2885 a8083063 Iustin Pop
2886 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2887 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2888 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2889 a8083063 Iustin Pop
                                                         'disk0_dump'))
2890 a8083063 Iustin Pop
      self.src_image = diskimage
2891 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2892 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2893 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2894 a8083063 Iustin Pop
2895 a8083063 Iustin Pop
    # check primary node
2896 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2897 a8083063 Iustin Pop
    if pnode is None:
2898 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2899 3ecf6786 Iustin Pop
                                 self.op.pnode)
2900 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2901 a8083063 Iustin Pop
    self.pnode = pnode
2902 a8083063 Iustin Pop
    self.secondaries = []
2903 a8083063 Iustin Pop
    # disk template and mirror node verification
2904 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2905 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2906 a8083063 Iustin Pop
2907 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
2908 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2909 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
2910 3ecf6786 Iustin Pop
                                   " a mirror node")
2911 a8083063 Iustin Pop
2912 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2913 a8083063 Iustin Pop
      if snode_name is None:
2914 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2915 3ecf6786 Iustin Pop
                                   self.op.snode)
2916 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2917 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
2918 3ecf6786 Iustin Pop
                                   " the primary node.")
2919 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2920 a8083063 Iustin Pop
2921 ed1ebc60 Guido Trotter
    # Check lv size requirements
2922 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2923 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2924 ed1ebc60 Guido Trotter
2925 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2926 ed1ebc60 Guido Trotter
    req_size_dict = {
2927 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2928 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2929 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2930 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2931 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2932 a1f445d3 Iustin Pop
      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
2933 ed1ebc60 Guido Trotter
    }
2934 ed1ebc60 Guido Trotter
2935 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2936 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
2937 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
2938 ed1ebc60 Guido Trotter
2939 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2940 ed1ebc60 Guido Trotter
2941 ed1ebc60 Guido Trotter
    for node in nodenames:
2942 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2943 ed1ebc60 Guido Trotter
      if not info:
2944 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
2945 3ecf6786 Iustin Pop
                                   " from node '%s'" % nodeinfo)
2946 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2947 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s."
2948 3ecf6786 Iustin Pop
                                   " %d MB available, %d MB required" %
2949 3ecf6786 Iustin Pop
                                   (node, info['vg_free'], req_size))
2950 ed1ebc60 Guido Trotter
2951 a8083063 Iustin Pop
    # os verification
2952 a8083063 Iustin Pop
    os_obj = rpc.call_os_get([pnode.name], self.op.os_type)[pnode.name]
2953 a8083063 Iustin Pop
    if not isinstance(os_obj, objects.OS):
2954 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
2955 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
2956 a8083063 Iustin Pop
2957 a8083063 Iustin Pop
    # instance verification
2958 89e1fc26 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
2959 a8083063 Iustin Pop
2960 bcf043c9 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
2961 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2962 a8083063 Iustin Pop
    if instance_name in instance_list:
2963 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2964 3ecf6786 Iustin Pop
                                 instance_name)
2965 a8083063 Iustin Pop
2966 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2967 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2968 a8083063 Iustin Pop
      inst_ip = None
2969 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2970 bcf043c9 Iustin Pop
      inst_ip = hostname1.ip
2971 a8083063 Iustin Pop
    else:
2972 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2973 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
2974 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
2975 a8083063 Iustin Pop
      inst_ip = ip
2976 a8083063 Iustin Pop
    self.inst_ip = inst_ip
2977 a8083063 Iustin Pop
2978 bdd55f71 Iustin Pop
    if self.op.start and not self.op.ip_check:
2979 bdd55f71 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
2980 bdd55f71 Iustin Pop
                                 " adding an instance in start mode")
2981 bdd55f71 Iustin Pop
2982 bdd55f71 Iustin Pop
    if self.op.ip_check:
2983 16abfbc2 Alexander Schreiber
      if utils.TcpPing(utils.HostInfo().name, hostname1.ip,
2984 16abfbc2 Alexander Schreiber
                       constants.DEFAULT_NODED_PORT):
2985 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2986 16abfbc2 Alexander Schreiber
                                   (hostname1.ip, instance_name))
2987 a8083063 Iustin Pop
2988 a8083063 Iustin Pop
    # bridge verification
2989 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
2990 a8083063 Iustin Pop
    if bridge is None:
2991 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
2992 a8083063 Iustin Pop
    else:
2993 a8083063 Iustin Pop
      self.op.bridge = bridge
2994 a8083063 Iustin Pop
2995 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
2996 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
2997 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
2998 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
2999 a8083063 Iustin Pop
3000 a8083063 Iustin Pop
    if self.op.start:
3001 a8083063 Iustin Pop
      self.instance_status = 'up'
3002 a8083063 Iustin Pop
    else:
3003 a8083063 Iustin Pop
      self.instance_status = 'down'
3004 a8083063 Iustin Pop
3005 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3006 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3007 a8083063 Iustin Pop

3008 a8083063 Iustin Pop
    """
3009 a8083063 Iustin Pop
    instance = self.op.instance_name
3010 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3011 a8083063 Iustin Pop
3012 a8083063 Iustin Pop
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
3013 a8083063 Iustin Pop
    if self.inst_ip is not None:
3014 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3015 a8083063 Iustin Pop
3016 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3017 a8083063 Iustin Pop
                                  self.op.disk_template,
3018 a8083063 Iustin Pop
                                  instance, pnode_name,
3019 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3020 a8083063 Iustin Pop
                                  self.op.swap_size)
3021 a8083063 Iustin Pop
3022 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3023 a8083063 Iustin Pop
                            primary_node=pnode_name,
3024 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3025 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3026 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3027 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3028 a8083063 Iustin Pop
                            status=self.instance_status,
3029 a8083063 Iustin Pop
                            )
3030 a8083063 Iustin Pop
3031 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3032 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3033 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3034 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3035 a8083063 Iustin Pop
3036 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3037 a8083063 Iustin Pop
3038 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3039 a8083063 Iustin Pop
3040 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3041 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj)
3042 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3043 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3044 a8083063 Iustin Pop
      time.sleep(15)
3045 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3046 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
3047 a8083063 Iustin Pop
    else:
3048 a8083063 Iustin Pop
      disk_abort = False
3049 a8083063 Iustin Pop
3050 a8083063 Iustin Pop
    if disk_abort:
3051 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3052 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3053 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3054 3ecf6786 Iustin Pop
                               " this instance")
3055 a8083063 Iustin Pop
3056 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3057 a8083063 Iustin Pop
                (instance, pnode_name))
3058 a8083063 Iustin Pop
3059 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3060 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3061 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3062 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3063 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3064 3ecf6786 Iustin Pop
                                   " on node %s" %
3065 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3066 a8083063 Iustin Pop
3067 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3068 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3069 a8083063 Iustin Pop
        src_node = self.op.src_node
3070 a8083063 Iustin Pop
        src_image = self.src_image
3071 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3072 a8083063 Iustin Pop
                                                src_node, src_image):
3073 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3074 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3075 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3076 a8083063 Iustin Pop
      else:
3077 a8083063 Iustin Pop
        # also checked in the prereq part
3078 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3079 3ecf6786 Iustin Pop
                                     % self.op.mode)
3080 a8083063 Iustin Pop
3081 a8083063 Iustin Pop
    if self.op.start:
3082 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3083 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3084 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3085 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3086 a8083063 Iustin Pop
3087 a8083063 Iustin Pop
3088 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3089 a8083063 Iustin Pop
  """Connect to an instance's console.
3090 a8083063 Iustin Pop

3091 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3092 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3093 a8083063 Iustin Pop
  console.
3094 a8083063 Iustin Pop

3095 a8083063 Iustin Pop
  """
3096 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3097 a8083063 Iustin Pop
3098 a8083063 Iustin Pop
  def CheckPrereq(self):
3099 a8083063 Iustin Pop
    """Check prerequisites.
3100 a8083063 Iustin Pop

3101 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3102 a8083063 Iustin Pop

3103 a8083063 Iustin Pop
    """
3104 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3105 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3106 a8083063 Iustin Pop
    if instance is None:
3107 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3108 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3109 a8083063 Iustin Pop
    self.instance = instance
3110 a8083063 Iustin Pop
3111 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3112 a8083063 Iustin Pop
    """Connect to the console of an instance
3113 a8083063 Iustin Pop

3114 a8083063 Iustin Pop
    """
3115 a8083063 Iustin Pop
    instance = self.instance
3116 a8083063 Iustin Pop
    node = instance.primary_node
3117 a8083063 Iustin Pop
3118 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3119 a8083063 Iustin Pop
    if node_insts is False:
3120 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3121 a8083063 Iustin Pop
3122 a8083063 Iustin Pop
    if instance.name not in node_insts:
3123 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3124 a8083063 Iustin Pop
3125 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3126 a8083063 Iustin Pop
3127 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3128 a8083063 Iustin Pop
    console_cmd = hyper.GetShellCommandForConsole(instance.name)
3129 82122173 Iustin Pop
    # build ssh cmdline
3130 82122173 Iustin Pop
    argv = ["ssh", "-q", "-t"]
3131 82122173 Iustin Pop
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
3132 82122173 Iustin Pop
    argv.extend(ssh.BATCH_MODE_OPTS)
3133 82122173 Iustin Pop
    argv.append(node)
3134 82122173 Iustin Pop
    argv.append(console_cmd)
3135 82122173 Iustin Pop
    return "ssh", argv
3136 a8083063 Iustin Pop
3137 a8083063 Iustin Pop
3138 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
3139 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
3140 a8083063 Iustin Pop

3141 a8083063 Iustin Pop
  """
3142 a8083063 Iustin Pop
  HPATH = "mirror-add"
3143 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3144 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3145 a8083063 Iustin Pop
3146 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3147 a8083063 Iustin Pop
    """Build hooks env.
3148 a8083063 Iustin Pop

3149 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3150 a8083063 Iustin Pop

3151 a8083063 Iustin Pop
    """
3152 a8083063 Iustin Pop
    env = {
3153 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3154 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3155 a8083063 Iustin Pop
      }
3156 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3157 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3158 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3159 a8083063 Iustin Pop
    return env, nl, nl
3160 a8083063 Iustin Pop
3161 a8083063 Iustin Pop
  def CheckPrereq(self):
3162 a8083063 Iustin Pop
    """Check prerequisites.
3163 a8083063 Iustin Pop

3164 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3165 a8083063 Iustin Pop

3166 a8083063 Iustin Pop
    """
3167 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3168 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3169 a8083063 Iustin Pop
    if instance is None:
3170 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3171 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3172 a8083063 Iustin Pop
    self.instance = instance
3173 a8083063 Iustin Pop
3174 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3175 a8083063 Iustin Pop
    if remote_node is None:
3176 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3177 a8083063 Iustin Pop
    self.remote_node = remote_node
3178 a8083063 Iustin Pop
3179 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3180 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3181 3ecf6786 Iustin Pop
                                 " the instance.")
3182 a8083063 Iustin Pop
3183 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3184 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3185 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3186 a8083063 Iustin Pop
    for disk in instance.disks:
3187 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3188 a8083063 Iustin Pop
        break
3189 a8083063 Iustin Pop
    else:
3190 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3191 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3192 a8083063 Iustin Pop
    if len(disk.children) > 1:
3193 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The device already has two slave"
3194 3ecf6786 Iustin Pop
                                 " devices.\n"
3195 3ecf6786 Iustin Pop
                                 "This would create a 3-disk raid1"
3196 3ecf6786 Iustin Pop
                                 " which we don't allow.")
3197 a8083063 Iustin Pop
    self.disk = disk
3198 a8083063 Iustin Pop
3199 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3200 a8083063 Iustin Pop
    """Add the mirror component
3201 a8083063 Iustin Pop

3202 a8083063 Iustin Pop
    """
3203 a8083063 Iustin Pop
    disk = self.disk
3204 a8083063 Iustin Pop
    instance = self.instance
3205 a8083063 Iustin Pop
3206 a8083063 Iustin Pop
    remote_node = self.remote_node
3207 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3208 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
3209 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3210 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
3211 a8083063 Iustin Pop
3212 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
3213 a8083063 Iustin Pop
    #HARDCODE
3214 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, instance,
3215 3f78eef2 Iustin Pop
                                      new_drbd, False,
3216 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3217 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
3218 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
3219 a8083063 Iustin Pop
3220 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
3221 a8083063 Iustin Pop
    #HARDCODE
3222 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node,
3223 3f78eef2 Iustin Pop
                                    instance, new_drbd,
3224 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
3225 a8083063 Iustin Pop
      # remove secondary dev
3226 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3227 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
3228 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
3229 a8083063 Iustin Pop
3230 a8083063 Iustin Pop
    # the device exists now
3231 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
3232 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
3233 153d9724 Iustin Pop
    if not rpc.call_blockdev_addchildren(instance.primary_node,
3234 153d9724 Iustin Pop
                                         disk, [new_drbd]):
3235 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
3236 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3237 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3238 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
3239 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3240 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3241 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
3242 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
3243 a8083063 Iustin Pop
3244 a8083063 Iustin Pop
    disk.children.append(new_drbd)
3245 a8083063 Iustin Pop
3246 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3247 a8083063 Iustin Pop
3248 a8083063 Iustin Pop
    _WaitForSync(self.cfg, instance)
3249 a8083063 Iustin Pop
3250 a8083063 Iustin Pop
    return 0
3251 a8083063 Iustin Pop
3252 a8083063 Iustin Pop
3253 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
3254 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
3255 a8083063 Iustin Pop

3256 a8083063 Iustin Pop
  """
3257 a8083063 Iustin Pop
  HPATH = "mirror-remove"
3258 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3259 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3260 a8083063 Iustin Pop
3261 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3262 a8083063 Iustin Pop
    """Build hooks env.
3263 a8083063 Iustin Pop

3264 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3265 a8083063 Iustin Pop

3266 a8083063 Iustin Pop
    """
3267 a8083063 Iustin Pop
    env = {
3268 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3269 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
3270 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
3271 a8083063 Iustin Pop
      }
3272 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3273 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3274 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3275 a8083063 Iustin Pop
    return env, nl, nl
3276 a8083063 Iustin Pop
3277 a8083063 Iustin Pop
  def CheckPrereq(self):
3278 a8083063 Iustin Pop
    """Check prerequisites.
3279 a8083063 Iustin Pop

3280 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3281 a8083063 Iustin Pop

3282 a8083063 Iustin Pop
    """
3283 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3284 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3285 a8083063 Iustin Pop
    if instance is None:
3286 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3287 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3288 a8083063 Iustin Pop
    self.instance = instance
3289 a8083063 Iustin Pop
3290 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3291 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3292 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3293 a8083063 Iustin Pop
    for disk in instance.disks:
3294 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3295 a8083063 Iustin Pop
        break
3296 a8083063 Iustin Pop
    else:
3297 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3298 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3299 a8083063 Iustin Pop
    for child in disk.children:
3300 fe96220b Iustin Pop
      if (child.dev_type == constants.LD_DRBD7 and
3301 fe96220b Iustin Pop
          child.logical_id[2] == self.op.disk_id):
3302 a8083063 Iustin Pop
        break
3303 a8083063 Iustin Pop
    else:
3304 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3305 a8083063 Iustin Pop
3306 a8083063 Iustin Pop
    if len(disk.children) < 2:
3307 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3308 3ecf6786 Iustin Pop
                                 " a mirror.")
3309 a8083063 Iustin Pop
    self.disk = disk
3310 a8083063 Iustin Pop
    self.child = child
3311 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3312 a8083063 Iustin Pop
      oid = 1
3313 a8083063 Iustin Pop
    else:
3314 a8083063 Iustin Pop
      oid = 0
3315 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3316 a8083063 Iustin Pop
3317 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3318 a8083063 Iustin Pop
    """Remove the mirror component
3319 a8083063 Iustin Pop

3320 a8083063 Iustin Pop
    """
3321 a8083063 Iustin Pop
    instance = self.instance
3322 a8083063 Iustin Pop
    disk = self.disk
3323 a8083063 Iustin Pop
    child = self.child
3324 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3325 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3326 153d9724 Iustin Pop
    if not rpc.call_blockdev_removechildren(instance.primary_node,
3327 153d9724 Iustin Pop
                                            disk, [child]):
3328 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3329 a8083063 Iustin Pop
3330 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3331 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3332 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3333 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3334 a8083063 Iustin Pop
                     " continuing operation." % node)
3335 a8083063 Iustin Pop
3336 a8083063 Iustin Pop
    disk.children.remove(child)
3337 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3338 a8083063 Iustin Pop
3339 a8083063 Iustin Pop
3340 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3341 a8083063 Iustin Pop
  """Replace the disks of an instance.
3342 a8083063 Iustin Pop

3343 a8083063 Iustin Pop
  """
3344 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3345 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3346 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3347 a8083063 Iustin Pop
3348 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3349 a8083063 Iustin Pop
    """Build hooks env.
3350 a8083063 Iustin Pop

3351 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3352 a8083063 Iustin Pop

3353 a8083063 Iustin Pop
    """
3354 a8083063 Iustin Pop
    env = {
3355 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3356 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3357 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3358 a8083063 Iustin Pop
      }
3359 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3360 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3361 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3362 a8083063 Iustin Pop
    return env, nl, nl
3363 a8083063 Iustin Pop
3364 a8083063 Iustin Pop
  def CheckPrereq(self):
3365 a8083063 Iustin Pop
    """Check prerequisites.
3366 a8083063 Iustin Pop

3367 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3368 a8083063 Iustin Pop

3369 a8083063 Iustin Pop
    """
3370 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3371 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3372 a8083063 Iustin Pop
    if instance is None:
3373 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3374 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3375 a8083063 Iustin Pop
    self.instance = instance
3376 a8083063 Iustin Pop
3377 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3378 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3379 a9e0c397 Iustin Pop
                                 " network mirrored.")
3380 a8083063 Iustin Pop
3381 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3382 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3383 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3384 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3385 a8083063 Iustin Pop
3386 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3387 a9e0c397 Iustin Pop
3388 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3389 a9e0c397 Iustin Pop
    if remote_node is not None:
3390 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3391 a8083063 Iustin Pop
      if remote_node is None:
3392 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3393 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3394 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3395 a9e0c397 Iustin Pop
    else:
3396 a9e0c397 Iustin Pop
      self.remote_node_info = None
3397 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3398 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3399 3ecf6786 Iustin Pop
                                 " the instance.")
3400 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3401 a9e0c397 Iustin Pop
      # the user gave the current secondary, switch to
3402 a9e0c397 Iustin Pop
      # 'no-replace-secondary' mode
3403 a9e0c397 Iustin Pop
      remote_node = None
3404 a9e0c397 Iustin Pop
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3405 a9e0c397 Iustin Pop
        self.op.mode != constants.REPLACE_DISK_ALL):
3406 a9e0c397 Iustin Pop
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3407 a9e0c397 Iustin Pop
                                 " disks replacement, not individual ones")
3408 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3409 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3410 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Template 'drbd8' only allows primary or"
3411 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3412 a9e0c397 Iustin Pop
                                   " both at once")
3413 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3414 a9e0c397 Iustin Pop
        if remote_node is not None:
3415 a9e0c397 Iustin Pop
          raise errors.OpPrereqError("Template 'drbd8' does not allow changing"
3416 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3417 a9e0c397 Iustin Pop
                                     " node disk replacement")
3418 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3419 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3420 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3421 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3422 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3423 a9e0c397 Iustin Pop
      else:
3424 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3425 a9e0c397 Iustin Pop
3426 a9e0c397 Iustin Pop
    for name in self.op.disks:
3427 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3428 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3429 a9e0c397 Iustin Pop
                                   (name, instance.name))
3430 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3431 a8083063 Iustin Pop
3432 a9e0c397 Iustin Pop
  def _ExecRR1(self, feedback_fn):
3433 a8083063 Iustin Pop
    """Replace the disks of an instance.
3434 a8083063 Iustin Pop

3435 a8083063 Iustin Pop
    """
3436 a8083063 Iustin Pop
    instance = self.instance
3437 a8083063 Iustin Pop
    iv_names = {}
3438 a8083063 Iustin Pop
    # start of work
3439 a9e0c397 Iustin Pop
    if self.op.remote_node is None:
3440 a9e0c397 Iustin Pop
      remote_node = self.sec_node
3441 a9e0c397 Iustin Pop
    else:
3442 a9e0c397 Iustin Pop
      remote_node = self.op.remote_node
3443 a8083063 Iustin Pop
    cfg = self.cfg
3444 a8083063 Iustin Pop
    for dev in instance.disks:
3445 a8083063 Iustin Pop
      size = dev.size
3446 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3447 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3448 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3449 923b1523 Iustin Pop
                                       remote_node, size, names)
3450 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3451 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3452 a8083063 Iustin Pop
                  dev.iv_name)
3453 a8083063 Iustin Pop
      #HARDCODE
3454 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3455 3f78eef2 Iustin Pop
                                        new_drbd, False,
3456 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3457 3ecf6786 Iustin Pop
        raise errors.OpExecError("Failed to create new component on"
3458 3ecf6786 Iustin Pop
                                 " secondary node %s\n"
3459 3ecf6786 Iustin Pop
                                 "Full abort, cleanup manually!" %
3460 3ecf6786 Iustin Pop
                                 remote_node)
3461 a8083063 Iustin Pop
3462 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3463 a8083063 Iustin Pop
      #HARDCODE
3464 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3465 3f78eef2 Iustin Pop
                                      instance, new_drbd,
3466 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3467 a8083063 Iustin Pop
        # remove secondary dev
3468 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3469 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3470 a8083063 Iustin Pop
        raise errors.OpExecError("Failed to create volume on primary!\n"
3471 a8083063 Iustin Pop
                                 "Full abort, cleanup manually!!")
3472 a8083063 Iustin Pop
3473 a8083063 Iustin Pop
      # the device exists now
3474 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3475 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3476 153d9724 Iustin Pop
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3477 153d9724 Iustin Pop
                                           [new_drbd]):
3478 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3479 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3480 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3481 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3482 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3483 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3484 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3485 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3486 a8083063 Iustin Pop
3487 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3488 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3489 a8083063 Iustin Pop
3490 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3491 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3492 a8083063 Iustin Pop
    # return value
3493 a8083063 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3494 a8083063 Iustin Pop
3495 a8083063 Iustin Pop
    # so check manually all the devices
3496 a8083063 Iustin Pop
    for name in iv_names:
3497 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3498 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3499 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3500 a8083063 Iustin Pop
      if is_degr:
3501 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3502 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3503 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3504 a8083063 Iustin Pop
      if is_degr:
3505 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3506 a8083063 Iustin Pop
3507 a8083063 Iustin Pop
    for name in iv_names:
3508 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3509 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3510 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3511 153d9724 Iustin Pop
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3512 153d9724 Iustin Pop
                                              dev, [child]):
3513 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3514 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3515 a8083063 Iustin Pop
        continue
3516 a8083063 Iustin Pop
3517 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3518 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3519 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3520 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3521 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3522 a8083063 Iustin Pop
                       " continuing operation." % node)
3523 a8083063 Iustin Pop
3524 a8083063 Iustin Pop
      dev.children.remove(child)
3525 a8083063 Iustin Pop
3526 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3527 a8083063 Iustin Pop
3528 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3529 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3530 a9e0c397 Iustin Pop

3531 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3532 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3533 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3534 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3535 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3536 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3537 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3538 a9e0c397 Iustin Pop
      - wait for sync across all devices
3539 a9e0c397 Iustin Pop
      - for each modified disk:
3540 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3541 a9e0c397 Iustin Pop

3542 a9e0c397 Iustin Pop
    Failures are not very well handled.
3543 a9e0c397 Iustin Pop
    """
3544 a9e0c397 Iustin Pop
    instance = self.instance
3545 a9e0c397 Iustin Pop
    iv_names = {}
3546 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3547 a9e0c397 Iustin Pop
    # start of work
3548 a9e0c397 Iustin Pop
    cfg = self.cfg
3549 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3550 a9e0c397 Iustin Pop
    for dev in instance.disks:
3551 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3552 a9e0c397 Iustin Pop
        continue
3553 a9e0c397 Iustin Pop
      size = dev.size
3554 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3555 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3556 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3557 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3558 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3559 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3560 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3561 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3562 a9e0c397 Iustin Pop
      old_lvs = dev.children
3563 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3564 a9e0c397 Iustin Pop
      logger.Info("adding new local storage on %s for %s" %
3565 a9e0c397 Iustin Pop
                  (tgt_node, dev.iv_name))
3566 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3567 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3568 a9e0c397 Iustin Pop
      # are talking about the secondary node
3569 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3570 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3571 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3572 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3573 a9e0c397 Iustin Pop
                                   " node '%s'" %
3574 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3575 a9e0c397 Iustin Pop
3576 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3577 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3578 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3579 a9e0c397 Iustin Pop
      dev.children = []
3580 a9e0c397 Iustin Pop
      cfg.Update(instance)
3581 a9e0c397 Iustin Pop
3582 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3583 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3584 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3585 a9e0c397 Iustin Pop
      # using the assumption than logical_id == physical_id (which in
3586 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3587 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3588 a9e0c397 Iustin Pop
      logger.Info("renaming the old LVs on the target node")
3589 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3590 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3591 a9e0c397 Iustin Pop
      rlist = [(disk, ren_fn(disk, temp_suffix)) for disk in old_lvs]
3592 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3593 a9e0c397 Iustin Pop
        logger.Error("Can't rename old LVs on node %s" % tgt_node)
3594 a9e0c397 Iustin Pop
        do_change_old = False
3595 a9e0c397 Iustin Pop
      else:
3596 a9e0c397 Iustin Pop
        do_change_old = True
3597 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3598 a9e0c397 Iustin Pop
      logger.Info("renaming the new LVs on the target node")
3599 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3600 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3601 a9e0c397 Iustin Pop
        logger.Error("Can't rename new LVs on node %s" % tgt_node)
3602 a9e0c397 Iustin Pop
      else:
3603 a9e0c397 Iustin Pop
        for old, new in zip(old_lvs, new_lvs):
3604 a9e0c397 Iustin Pop
          new.logical_id = old.logical_id
3605 a9e0c397 Iustin Pop
          cfg.SetDiskID(new, tgt_node)
3606 a9e0c397 Iustin Pop
3607 a9e0c397 Iustin Pop
      if do_change_old:
3608 a9e0c397 Iustin Pop
        for disk in old_lvs:
3609 a9e0c397 Iustin Pop
          disk.logical_id = ren_fn(disk, temp_suffix)
3610 a9e0c397 Iustin Pop
          cfg.SetDiskID(disk, tgt_node)
3611 a9e0c397 Iustin Pop
3612 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3613 a9e0c397 Iustin Pop
      logger.Info("adding new mirror component on %s" % tgt_node)
3614 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3615 a9e0c397 Iustin Pop
        logger.Error("Can't add local storage to drbd!")
3616 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3617 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3618 a9e0c397 Iustin Pop
            logger.Error("Can't rollback device %s")
3619 a9e0c397 Iustin Pop
        return
3620 a9e0c397 Iustin Pop
3621 a9e0c397 Iustin Pop
      dev.children = new_lvs
3622 a9e0c397 Iustin Pop
      cfg.Update(instance)
3623 a9e0c397 Iustin Pop
3624 a9e0c397 Iustin Pop
3625 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3626 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3627 a9e0c397 Iustin Pop
    # return value
3628 a9e0c397 Iustin Pop
    logger.Info("Done changing drbd configs, waiting for sync")
3629 a9e0c397 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3630 a9e0c397 Iustin Pop
3631 a9e0c397 Iustin Pop
    # so check manually all the devices
3632 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3633 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3634 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3635 a9e0c397 Iustin Pop
      if is_degr:
3636 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3637 a9e0c397 Iustin Pop
3638 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3639 a9e0c397 Iustin Pop
      logger.Info("remove logical volumes for %s" % name)
3640 a9e0c397 Iustin Pop
      for lv in old_lvs:
3641 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3642 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3643 a9e0c397 Iustin Pop
          logger.Error("Can't cleanup child device, skipping. You need to"
3644 a9e0c397 Iustin Pop
                       " fix manually!")
3645 a9e0c397 Iustin Pop
          continue
3646 a9e0c397 Iustin Pop
3647 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3648 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3649 a9e0c397 Iustin Pop

3650 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3651 a9e0c397 Iustin Pop
      - for all disks of the instance:
3652 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3653 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3654 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3655 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3656 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3657 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3658 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3659 a9e0c397 Iustin Pop
          not network enabled
3660 a9e0c397 Iustin Pop
      - wait for sync across all devices
3661 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3662 a9e0c397 Iustin Pop

3663 a9e0c397 Iustin Pop
    Failures are not very well handled.
3664 a9e0c397 Iustin Pop
    """
3665 a9e0c397 Iustin Pop
    instance = self.instance
3666 a9e0c397 Iustin Pop
    iv_names = {}
3667 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3668 a9e0c397 Iustin Pop
    # start of work
3669 a9e0c397 Iustin Pop
    cfg = self.cfg
3670 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3671 a9e0c397 Iustin Pop
    new_node = self.new_node
3672 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3673 a9e0c397 Iustin Pop
    for dev in instance.disks:
3674 a9e0c397 Iustin Pop
      size = dev.size
3675 a9e0c397 Iustin Pop
      logger.Info("adding new local storage on %s for %s" %
3676 a9e0c397 Iustin Pop
                  (new_node, dev.iv_name))
3677 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3678 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3679 a9e0c397 Iustin Pop
      # are talking about the secondary node
3680 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3681 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3682 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3683 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3684 a9e0c397 Iustin Pop
                                   " node '%s'" %
3685 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3686 a9e0c397 Iustin Pop
3687 a9e0c397 Iustin Pop
      # create new devices on new_node
3688 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3689 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3690 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3691 a9e0c397 Iustin Pop
                              children=dev.children)
3692 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3693 3f78eef2 Iustin Pop
                                        new_drbd, False,
3694 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3695 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3696 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3697 a9e0c397 Iustin Pop
3698 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3699 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3700 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3701 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to shutdown DRBD on old node")
3702 a9e0c397 Iustin Pop
3703 a9e0c397 Iustin Pop
      # we have new storage, we 'rename' the network on the primary
3704 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3705 a9e0c397 Iustin Pop
      # rename to the ip of the new node
3706 a9e0c397 Iustin Pop
      new_uid = list(dev.physical_id)
3707 a9e0c397 Iustin Pop
      new_uid[2] = self.remote_node_info.secondary_ip
3708 a9e0c397 Iustin Pop
      rlist = [(dev, tuple(new_uid))]
3709 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(pri_node, rlist):
3710 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach re-attach drbd %s on node"
3711 a9e0c397 Iustin Pop
                                 " %s from %s to %s" %
3712 a9e0c397 Iustin Pop
                                 (dev.iv_name, pri_node, old_node, new_node))
3713 a9e0c397 Iustin Pop
      dev.logical_id = (pri_node, new_node, dev.logical_id[2])
3714 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3715 a9e0c397 Iustin Pop
      cfg.Update(instance)
3716 a9e0c397 Iustin Pop
3717 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3718 a9e0c397 Iustin Pop
3719 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3720 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3721 a9e0c397 Iustin Pop
    # return value
3722 a9e0c397 Iustin Pop
    logger.Info("Done changing drbd configs, waiting for sync")
3723 a9e0c397 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3724 a9e0c397 Iustin Pop
3725 a9e0c397 Iustin Pop
    # so check manually all the devices
3726 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3727 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3728 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3729 a9e0c397 Iustin Pop
      if is_degr:
3730 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3731 a9e0c397 Iustin Pop
3732 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3733 a9e0c397 Iustin Pop
      logger.Info("remove logical volumes for %s" % name)
3734 a9e0c397 Iustin Pop
      for lv in old_lvs:
3735 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3736 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3737 a9e0c397 Iustin Pop
          logger.Error("Can't cleanup child device, skipping. You need to"
3738 a9e0c397 Iustin Pop
                       " fix manually!")
3739 a9e0c397 Iustin Pop
          continue
3740 a9e0c397 Iustin Pop
3741 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3742 a9e0c397 Iustin Pop
    """Execute disk replacement.
3743 a9e0c397 Iustin Pop

3744 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3745 a9e0c397 Iustin Pop

3746 a9e0c397 Iustin Pop
    """
3747 a9e0c397 Iustin Pop
    instance = self.instance
3748 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_REMOTE_RAID1:
3749 a9e0c397 Iustin Pop
      fn = self._ExecRR1
3750 a9e0c397 Iustin Pop
    elif instance.disk_template == constants.DT_DRBD8:
3751 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3752 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3753 a9e0c397 Iustin Pop
      else:
3754 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3755 a9e0c397 Iustin Pop
    else:
3756 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3757 a9e0c397 Iustin Pop
    return fn(feedback_fn)
3758 a9e0c397 Iustin Pop
3759 a8083063 Iustin Pop
3760 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3761 a8083063 Iustin Pop
  """Query runtime instance data.
3762 a8083063 Iustin Pop

3763 a8083063 Iustin Pop
  """
3764 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3765 a8083063 Iustin Pop
3766 a8083063 Iustin Pop
  def CheckPrereq(self):
3767 a8083063 Iustin Pop
    """Check prerequisites.
3768 a8083063 Iustin Pop

3769 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3770 a8083063 Iustin Pop

3771 a8083063 Iustin Pop
    """
3772 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3773 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3774 a8083063 Iustin Pop
    if self.op.instances:
3775 a8083063 Iustin Pop
      self.wanted_instances = []
3776 a8083063 Iustin Pop
      names = self.op.instances
3777 a8083063 Iustin Pop
      for name in names:
3778 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3779 a8083063 Iustin Pop
        if instance is None:
3780 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3781 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3782 a8083063 Iustin Pop
    else:
3783 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3784 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3785 a8083063 Iustin Pop
    return
3786 a8083063 Iustin Pop
3787 a8083063 Iustin Pop
3788 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3789 a8083063 Iustin Pop
    """Compute block device status.
3790 a8083063 Iustin Pop

3791 a8083063 Iustin Pop
    """
3792 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3793 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3794 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
3795 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3796 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3797 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3798 a8083063 Iustin Pop
      else:
3799 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3800 a8083063 Iustin Pop
3801 a8083063 Iustin Pop
    if snode:
3802 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3803 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3804 a8083063 Iustin Pop
    else:
3805 a8083063 Iustin Pop
      dev_sstatus = None
3806 a8083063 Iustin Pop
3807 a8083063 Iustin Pop
    if dev.children:
3808 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3809 a8083063 Iustin Pop
                      for child in dev.children]
3810 a8083063 Iustin Pop
    else:
3811 a8083063 Iustin Pop
      dev_children = []
3812 a8083063 Iustin Pop
3813 a8083063 Iustin Pop
    data = {
3814 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3815 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3816 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3817 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3818 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3819 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3820 a8083063 Iustin Pop
      "children": dev_children,
3821 a8083063 Iustin Pop
      }
3822 a8083063 Iustin Pop
3823 a8083063 Iustin Pop
    return data
3824 a8083063 Iustin Pop
3825 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3826 a8083063 Iustin Pop
    """Gather and return data"""
3827 a8083063 Iustin Pop
    result = {}
3828 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3829 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3830 a8083063 Iustin Pop
                                                instance.name)
3831 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3832 a8083063 Iustin Pop
        remote_state = "up"
3833 a8083063 Iustin Pop
      else:
3834 a8083063 Iustin Pop
        remote_state = "down"
3835 a8083063 Iustin Pop
      if instance.status == "down":
3836 a8083063 Iustin Pop
        config_state = "down"
3837 a8083063 Iustin Pop
      else:
3838 a8083063 Iustin Pop
        config_state = "up"
3839 a8083063 Iustin Pop
3840 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3841 a8083063 Iustin Pop
               for device in instance.disks]
3842 a8083063 Iustin Pop
3843 a8083063 Iustin Pop
      idict = {
3844 a8083063 Iustin Pop
        "name": instance.name,
3845 a8083063 Iustin Pop
        "config_state": config_state,
3846 a8083063 Iustin Pop
        "run_state": remote_state,
3847 a8083063 Iustin Pop
        "pnode": instance.primary_node,
3848 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
3849 a8083063 Iustin Pop
        "os": instance.os,
3850 a8083063 Iustin Pop
        "memory": instance.memory,
3851 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3852 a8083063 Iustin Pop
        "disks": disks,
3853 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
3854 a8083063 Iustin Pop
        }
3855 a8083063 Iustin Pop
3856 a8083063 Iustin Pop
      result[instance.name] = idict
3857 a8083063 Iustin Pop
3858 a8083063 Iustin Pop
    return result
3859 a8083063 Iustin Pop
3860 a8083063 Iustin Pop
3861 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
3862 a8083063 Iustin Pop
  """Modifies an instances's parameters.
3863 a8083063 Iustin Pop

3864 a8083063 Iustin Pop
  """
3865 a8083063 Iustin Pop
  HPATH = "instance-modify"
3866 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3867 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3868 a8083063 Iustin Pop
3869 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3870 a8083063 Iustin Pop
    """Build hooks env.
3871 a8083063 Iustin Pop

3872 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
3873 a8083063 Iustin Pop

3874 a8083063 Iustin Pop
    """
3875 396e1b78 Michael Hanselmann
    args = dict()
3876 a8083063 Iustin Pop
    if self.mem:
3877 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
3878 a8083063 Iustin Pop
    if self.vcpus:
3879 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
3880 396e1b78 Michael Hanselmann
    if self.do_ip or self.do_bridge:
3881 396e1b78 Michael Hanselmann
      if self.do_ip:
3882 396e1b78 Michael Hanselmann
        ip = self.ip
3883 396e1b78 Michael Hanselmann
      else:
3884 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
3885 396e1b78 Michael Hanselmann
      if self.bridge:
3886 396e1b78 Michael Hanselmann
        bridge = self.bridge
3887 396e1b78 Michael Hanselmann
      else:
3888 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
3889 396e1b78 Michael Hanselmann
      args['nics'] = [(ip, bridge)]
3890 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
3891 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3892 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3893 a8083063 Iustin Pop
    return env, nl, nl
3894 a8083063 Iustin Pop
3895 a8083063 Iustin Pop
  def CheckPrereq(self):
3896 a8083063 Iustin Pop
    """Check prerequisites.
3897 a8083063 Iustin Pop

3898 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
3899 a8083063 Iustin Pop

3900 a8083063 Iustin Pop
    """
3901 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
3902 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
3903 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
3904 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
3905 a8083063 Iustin Pop
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
3906 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
3907 a8083063 Iustin Pop
    if self.mem is not None:
3908 a8083063 Iustin Pop
      try:
3909 a8083063 Iustin Pop
        self.mem = int(self.mem)
3910 a8083063 Iustin Pop
      except ValueError, err:
3911 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
3912 a8083063 Iustin Pop
    if self.vcpus is not None:
3913 a8083063 Iustin Pop
      try:
3914 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
3915 a8083063 Iustin Pop
      except ValueError, err:
3916 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
3917 a8083063 Iustin Pop
    if self.ip is not None:
3918 a8083063 Iustin Pop
      self.do_ip = True
3919 a8083063 Iustin Pop
      if self.ip.lower() == "none":
3920 a8083063 Iustin Pop
        self.ip = None
3921 a8083063 Iustin Pop
      else:
3922 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
3923 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
3924 a8083063 Iustin Pop
    else:
3925 a8083063 Iustin Pop
      self.do_ip = False
3926 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
3927 a8083063 Iustin Pop
3928 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3929 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3930 a8083063 Iustin Pop
    if instance is None:
3931 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
3932 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3933 a8083063 Iustin Pop
    self.op.instance_name = instance.name
3934 a8083063 Iustin Pop
    self.instance = instance
3935 a8083063 Iustin Pop
    return
3936 a8083063 Iustin Pop
3937 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3938 a8083063 Iustin Pop
    """Modifies an instance.
3939 a8083063 Iustin Pop

3940 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
3941 a8083063 Iustin Pop
    """
3942 a8083063 Iustin Pop
    result = []
3943 a8083063 Iustin Pop
    instance = self.instance
3944 a8083063 Iustin Pop
    if self.mem:
3945 a8083063 Iustin Pop
      instance.memory = self.mem
3946 a8083063 Iustin Pop
      result.append(("mem", self.mem))
3947 a8083063 Iustin Pop
    if self.vcpus:
3948 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
3949 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
3950 a8083063 Iustin Pop
    if self.do_ip:
3951 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
3952 a8083063 Iustin Pop
      result.append(("ip", self.ip))
3953 a8083063 Iustin Pop
    if self.bridge:
3954 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
3955 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
3956 a8083063 Iustin Pop
3957 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3958 a8083063 Iustin Pop
3959 a8083063 Iustin Pop
    return result
3960 a8083063 Iustin Pop
3961 a8083063 Iustin Pop
3962 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
3963 a8083063 Iustin Pop
  """Query the exports list
3964 a8083063 Iustin Pop

3965 a8083063 Iustin Pop
  """
3966 a8083063 Iustin Pop
  _OP_REQP = []
3967 a8083063 Iustin Pop
3968 a8083063 Iustin Pop
  def CheckPrereq(self):
3969 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
3970 a8083063 Iustin Pop

3971 a8083063 Iustin Pop
    """
3972 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
3973 a8083063 Iustin Pop
3974 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3975 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
3976 a8083063 Iustin Pop

3977 a8083063 Iustin Pop
    Returns:
3978 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
3979 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
3980 a8083063 Iustin Pop
      that node.
3981 a8083063 Iustin Pop

3982 a8083063 Iustin Pop
    """
3983 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
3984 a8083063 Iustin Pop
3985 a8083063 Iustin Pop
3986 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
3987 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
3988 a8083063 Iustin Pop

3989 a8083063 Iustin Pop
  """
3990 a8083063 Iustin Pop
  HPATH = "instance-export"
3991 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3992 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
3993 a8083063 Iustin Pop
3994 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3995 a8083063 Iustin Pop
    """Build hooks env.
3996 a8083063 Iustin Pop

3997 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
3998 a8083063 Iustin Pop

3999 a8083063 Iustin Pop
    """
4000 a8083063 Iustin Pop
    env = {
4001 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4002 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4003 a8083063 Iustin Pop
      }
4004 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4005 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4006 a8083063 Iustin Pop
          self.op.target_node]
4007 a8083063 Iustin Pop
    return env, nl, nl
4008 a8083063 Iustin Pop
4009 a8083063 Iustin Pop
  def CheckPrereq(self):
4010 a8083063 Iustin Pop
    """Check prerequisites.
4011 a8083063 Iustin Pop

4012 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
4013 a8083063 Iustin Pop

4014 a8083063 Iustin Pop
    """
4015 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4016 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4017 a8083063 Iustin Pop
    if self.instance is None:
4018 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4019 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4020 a8083063 Iustin Pop
4021 a8083063 Iustin Pop
    # node verification
4022 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4023 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4024 a8083063 Iustin Pop
4025 a8083063 Iustin Pop
    if self.dst_node is None:
4026 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4027 3ecf6786 Iustin Pop
                                 self.op.target_node)
4028 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4029 a8083063 Iustin Pop
4030 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4031 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4032 a8083063 Iustin Pop

4033 a8083063 Iustin Pop
    """
4034 a8083063 Iustin Pop
    instance = self.instance
4035 a8083063 Iustin Pop
    dst_node = self.dst_node
4036 a8083063 Iustin Pop
    src_node = instance.primary_node
4037 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
4038 a8083063 Iustin Pop
    if self.op.shutdown:
4039 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
4040 1a8c0ce1 Iustin Pop
      self.processor.ChainOpCode(op)
4041 a8083063 Iustin Pop
4042 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4043 a8083063 Iustin Pop
4044 a8083063 Iustin Pop
    snap_disks = []
4045 a8083063 Iustin Pop
4046 a8083063 Iustin Pop
    try:
4047 a8083063 Iustin Pop
      for disk in instance.disks:
4048 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4049 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4050 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4051 a8083063 Iustin Pop
4052 a8083063 Iustin Pop
          if not new_dev_name:
4053 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4054 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4055 a8083063 Iustin Pop
          else:
4056 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4057 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4058 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4059 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4060 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4061 a8083063 Iustin Pop
4062 a8083063 Iustin Pop
    finally:
4063 a8083063 Iustin Pop
      if self.op.shutdown:
4064 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
4065 a8083063 Iustin Pop
                                       force=False)
4066 1a8c0ce1 Iustin Pop
        self.processor.ChainOpCode(op)
4067 a8083063 Iustin Pop
4068 a8083063 Iustin Pop
    # TODO: check for size
4069 a8083063 Iustin Pop
4070 a8083063 Iustin Pop
    for dev in snap_disks:
4071 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
4072 a8083063 Iustin Pop
                                           instance):
4073 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
4074 a8083063 Iustin Pop
                     " %s to node %s" %
4075 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
4076 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4077 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
4078 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
4079 a8083063 Iustin Pop
4080 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4081 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4082 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4083 a8083063 Iustin Pop
4084 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4085 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4086 a8083063 Iustin Pop
4087 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4088 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4089 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4090 a8083063 Iustin Pop
    if nodelist:
4091 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4092 1a8c0ce1 Iustin Pop
      exportlist = self.processor.ChainOpCode(op)
4093 a8083063 Iustin Pop
      for node in exportlist:
4094 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4095 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4096 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4097 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4098 5c947f38 Iustin Pop
4099 5c947f38 Iustin Pop
4100 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4101 5c947f38 Iustin Pop
  """Generic tags LU.
4102 5c947f38 Iustin Pop

4103 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4104 5c947f38 Iustin Pop

4105 5c947f38 Iustin Pop
  """
4106 5c947f38 Iustin Pop
  def CheckPrereq(self):
4107 5c947f38 Iustin Pop
    """Check prerequisites.
4108 5c947f38 Iustin Pop

4109 5c947f38 Iustin Pop
    """
4110 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4111 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4112 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4113 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4114 5c947f38 Iustin Pop
      if name is None:
4115 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4116 3ecf6786 Iustin Pop
                                   (self.op.name,))
4117 5c947f38 Iustin Pop
      self.op.name = name
4118 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4119 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4120 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4121 5c947f38 Iustin Pop
      if name is None:
4122 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4123 3ecf6786 Iustin Pop
                                   (self.op.name,))
4124 5c947f38 Iustin Pop
      self.op.name = name
4125 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4126 5c947f38 Iustin Pop
    else:
4127 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4128 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4129 5c947f38 Iustin Pop
4130 5c947f38 Iustin Pop
4131 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4132 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4133 5c947f38 Iustin Pop

4134 5c947f38 Iustin Pop
  """
4135 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4136 5c947f38 Iustin Pop
4137 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4138 5c947f38 Iustin Pop
    """Returns the tag list.
4139 5c947f38 Iustin Pop

4140 5c947f38 Iustin Pop
    """
4141 5c947f38 Iustin Pop
    return self.target.GetTags()
4142 5c947f38 Iustin Pop
4143 5c947f38 Iustin Pop
4144 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4145 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4146 73415719 Iustin Pop

4147 73415719 Iustin Pop
  """
4148 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4149 73415719 Iustin Pop
4150 73415719 Iustin Pop
  def CheckPrereq(self):
4151 73415719 Iustin Pop
    """Check prerequisites.
4152 73415719 Iustin Pop

4153 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4154 73415719 Iustin Pop

4155 73415719 Iustin Pop
    """
4156 73415719 Iustin Pop
    try:
4157 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4158 73415719 Iustin Pop
    except re.error, err:
4159 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4160 73415719 Iustin Pop
                                 (self.op.pattern, err))
4161 73415719 Iustin Pop
4162 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4163 73415719 Iustin Pop
    """Returns the tag list.
4164 73415719 Iustin Pop

4165 73415719 Iustin Pop
    """
4166 73415719 Iustin Pop
    cfg = self.cfg
4167 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4168 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4169 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4170 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4171 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4172 73415719 Iustin Pop
    results = []
4173 73415719 Iustin Pop
    for path, target in tgts:
4174 73415719 Iustin Pop
      for tag in target.GetTags():
4175 73415719 Iustin Pop
        if self.re.search(tag):
4176 73415719 Iustin Pop
          results.append((path, tag))
4177 73415719 Iustin Pop
    return results
4178 73415719 Iustin Pop
4179 73415719 Iustin Pop
4180 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4181 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4182 5c947f38 Iustin Pop

4183 5c947f38 Iustin Pop
  """
4184 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4185 5c947f38 Iustin Pop
4186 5c947f38 Iustin Pop
  def CheckPrereq(self):
4187 5c947f38 Iustin Pop
    """Check prerequisites.
4188 5c947f38 Iustin Pop

4189 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4190 5c947f38 Iustin Pop

4191 5c947f38 Iustin Pop
    """
4192 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4193 f27302fa Iustin Pop
    for tag in self.op.tags:
4194 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4195 5c947f38 Iustin Pop
4196 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4197 5c947f38 Iustin Pop
    """Sets the tag.
4198 5c947f38 Iustin Pop

4199 5c947f38 Iustin Pop
    """
4200 5c947f38 Iustin Pop
    try:
4201 f27302fa Iustin Pop
      for tag in self.op.tags:
4202 f27302fa Iustin Pop
        self.target.AddTag(tag)
4203 5c947f38 Iustin Pop
    except errors.TagError, err:
4204 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4205 5c947f38 Iustin Pop
    try:
4206 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4207 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4208 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4209 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4210 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4211 5c947f38 Iustin Pop
4212 5c947f38 Iustin Pop
4213 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4214 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4215 5c947f38 Iustin Pop

4216 5c947f38 Iustin Pop
  """
4217 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4218 5c947f38 Iustin Pop
4219 5c947f38 Iustin Pop
  def CheckPrereq(self):
4220 5c947f38 Iustin Pop
    """Check prerequisites.
4221 5c947f38 Iustin Pop

4222 5c947f38 Iustin Pop
    This checks that we have the given tag.
4223 5c947f38 Iustin Pop

4224 5c947f38 Iustin Pop
    """
4225 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4226 f27302fa Iustin Pop
    for tag in self.op.tags:
4227 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4228 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4229 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4230 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4231 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4232 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4233 f27302fa Iustin Pop
      diff_names.sort()
4234 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4235 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4236 5c947f38 Iustin Pop
4237 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4238 5c947f38 Iustin Pop
    """Remove the tag from the object.
4239 5c947f38 Iustin Pop

4240 5c947f38 Iustin Pop
    """
4241 f27302fa Iustin Pop
    for tag in self.op.tags:
4242 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4243 5c947f38 Iustin Pop
    try:
4244 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4245 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4246 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4247 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4248 3ecf6786 Iustin Pop
                                " aborted. Please retry.")