Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib.py @ 65fe4693

History | View | Annotate | Download (138.7 kB)

1 2f31098c Iustin Pop
#
2 a8083063 Iustin Pop
#
3 a8083063 Iustin Pop
4 a8083063 Iustin Pop
# Copyright (C) 2006, 2007 Google Inc.
5 a8083063 Iustin Pop
#
6 a8083063 Iustin Pop
# This program is free software; you can redistribute it and/or modify
7 a8083063 Iustin Pop
# it under the terms of the GNU General Public License as published by
8 a8083063 Iustin Pop
# the Free Software Foundation; either version 2 of the License, or
9 a8083063 Iustin Pop
# (at your option) any later version.
10 a8083063 Iustin Pop
#
11 a8083063 Iustin Pop
# This program is distributed in the hope that it will be useful, but
12 a8083063 Iustin Pop
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 a8083063 Iustin Pop
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 a8083063 Iustin Pop
# General Public License for more details.
15 a8083063 Iustin Pop
#
16 a8083063 Iustin Pop
# You should have received a copy of the GNU General Public License
17 a8083063 Iustin Pop
# along with this program; if not, write to the Free Software
18 a8083063 Iustin Pop
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 a8083063 Iustin Pop
# 02110-1301, USA.
20 a8083063 Iustin Pop
21 a8083063 Iustin Pop
22 880478f8 Iustin Pop
"""Module implementing the master-side code."""
23 a8083063 Iustin Pop
24 a8083063 Iustin Pop
# pylint: disable-msg=W0613,W0201
25 a8083063 Iustin Pop
26 a8083063 Iustin Pop
import os
27 a8083063 Iustin Pop
import os.path
28 a8083063 Iustin Pop
import sha
29 a8083063 Iustin Pop
import time
30 a8083063 Iustin Pop
import tempfile
31 a8083063 Iustin Pop
import re
32 a8083063 Iustin Pop
import platform
33 a8083063 Iustin Pop
34 a8083063 Iustin Pop
from ganeti import rpc
35 a8083063 Iustin Pop
from ganeti import ssh
36 a8083063 Iustin Pop
from ganeti import logger
37 a8083063 Iustin Pop
from ganeti import utils
38 a8083063 Iustin Pop
from ganeti import errors
39 a8083063 Iustin Pop
from ganeti import hypervisor
40 a8083063 Iustin Pop
from ganeti import config
41 a8083063 Iustin Pop
from ganeti import constants
42 a8083063 Iustin Pop
from ganeti import objects
43 a8083063 Iustin Pop
from ganeti import opcodes
44 a8083063 Iustin Pop
from ganeti import ssconf
45 a8083063 Iustin Pop
46 a8083063 Iustin Pop
class LogicalUnit(object):
47 396e1b78 Michael Hanselmann
  """Logical Unit base class.
48 a8083063 Iustin Pop

49 a8083063 Iustin Pop
  Subclasses must follow these rules:
50 a8083063 Iustin Pop
    - implement CheckPrereq which also fills in the opcode instance
51 a8083063 Iustin Pop
      with all the fields (even if as None)
52 a8083063 Iustin Pop
    - implement Exec
53 a8083063 Iustin Pop
    - implement BuildHooksEnv
54 a8083063 Iustin Pop
    - redefine HPATH and HTYPE
55 a8083063 Iustin Pop
    - optionally redefine their run requirements (REQ_CLUSTER,
56 a8083063 Iustin Pop
      REQ_MASTER); note that all commands require root permissions
57 a8083063 Iustin Pop

58 a8083063 Iustin Pop
  """
59 a8083063 Iustin Pop
  HPATH = None
60 a8083063 Iustin Pop
  HTYPE = None
61 a8083063 Iustin Pop
  _OP_REQP = []
62 a8083063 Iustin Pop
  REQ_CLUSTER = True
63 a8083063 Iustin Pop
  REQ_MASTER = True
64 a8083063 Iustin Pop
65 a8083063 Iustin Pop
  def __init__(self, processor, op, cfg, sstore):
66 a8083063 Iustin Pop
    """Constructor for LogicalUnit.
67 a8083063 Iustin Pop

68 a8083063 Iustin Pop
    This needs to be overriden in derived classes in order to check op
69 a8083063 Iustin Pop
    validity.
70 a8083063 Iustin Pop

71 a8083063 Iustin Pop
    """
72 a8083063 Iustin Pop
    self.processor = processor
73 a8083063 Iustin Pop
    self.op = op
74 a8083063 Iustin Pop
    self.cfg = cfg
75 a8083063 Iustin Pop
    self.sstore = sstore
76 a8083063 Iustin Pop
    for attr_name in self._OP_REQP:
77 a8083063 Iustin Pop
      attr_val = getattr(op, attr_name, None)
78 a8083063 Iustin Pop
      if attr_val is None:
79 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Required parameter '%s' missing" %
80 3ecf6786 Iustin Pop
                                   attr_name)
81 a8083063 Iustin Pop
    if self.REQ_CLUSTER:
82 a8083063 Iustin Pop
      if not cfg.IsCluster():
83 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cluster not initialized yet,"
84 3ecf6786 Iustin Pop
                                   " use 'gnt-cluster init' first.")
85 a8083063 Iustin Pop
      if self.REQ_MASTER:
86 880478f8 Iustin Pop
        master = sstore.GetMasterNode()
87 89e1fc26 Iustin Pop
        if master != utils.HostInfo().name:
88 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Commands must be run on the master"
89 3ecf6786 Iustin Pop
                                     " node %s" % master)
90 a8083063 Iustin Pop
91 a8083063 Iustin Pop
  def CheckPrereq(self):
92 a8083063 Iustin Pop
    """Check prerequisites for this LU.
93 a8083063 Iustin Pop

94 a8083063 Iustin Pop
    This method should check that the prerequisites for the execution
95 a8083063 Iustin Pop
    of this LU are fulfilled. It can do internode communication, but
96 a8083063 Iustin Pop
    it should be idempotent - no cluster or system changes are
97 a8083063 Iustin Pop
    allowed.
98 a8083063 Iustin Pop

99 a8083063 Iustin Pop
    The method should raise errors.OpPrereqError in case something is
100 a8083063 Iustin Pop
    not fulfilled. Its return value is ignored.
101 a8083063 Iustin Pop

102 a8083063 Iustin Pop
    This method should also update all the parameters of the opcode to
103 a8083063 Iustin Pop
    their canonical form; e.g. a short node name must be fully
104 a8083063 Iustin Pop
    expanded after this method has successfully completed (so that
105 a8083063 Iustin Pop
    hooks, logging, etc. work correctly).
106 a8083063 Iustin Pop

107 a8083063 Iustin Pop
    """
108 a8083063 Iustin Pop
    raise NotImplementedError
109 a8083063 Iustin Pop
110 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
111 a8083063 Iustin Pop
    """Execute the LU.
112 a8083063 Iustin Pop

113 a8083063 Iustin Pop
    This method should implement the actual work. It should raise
114 a8083063 Iustin Pop
    errors.OpExecError for failures that are somewhat dealt with in
115 a8083063 Iustin Pop
    code, or expected.
116 a8083063 Iustin Pop

117 a8083063 Iustin Pop
    """
118 a8083063 Iustin Pop
    raise NotImplementedError
119 a8083063 Iustin Pop
120 a8083063 Iustin Pop
  def BuildHooksEnv(self):
121 a8083063 Iustin Pop
    """Build hooks environment for this LU.
122 a8083063 Iustin Pop

123 a8083063 Iustin Pop
    This method should return a three-node tuple consisting of: a dict
124 a8083063 Iustin Pop
    containing the environment that will be used for running the
125 a8083063 Iustin Pop
    specific hook for this LU, a list of node names on which the hook
126 a8083063 Iustin Pop
    should run before the execution, and a list of node names on which
127 a8083063 Iustin Pop
    the hook should run after the execution.
128 a8083063 Iustin Pop

129 a8083063 Iustin Pop
    The keys of the dict must not have 'GANETI_' prefixed as this will
130 a8083063 Iustin Pop
    be handled in the hooks runner. Also note additional keys will be
131 a8083063 Iustin Pop
    added by the hooks runner. If the LU doesn't define any
132 a8083063 Iustin Pop
    environment, an empty dict (and not None) should be returned.
133 a8083063 Iustin Pop

134 a8083063 Iustin Pop
    As for the node lists, the master should not be included in the
135 a8083063 Iustin Pop
    them, as it will be added by the hooks runner in case this LU
136 a8083063 Iustin Pop
    requires a cluster to run on (otherwise we don't have a node
137 a8083063 Iustin Pop
    list). No nodes should be returned as an empty list (and not
138 a8083063 Iustin Pop
    None).
139 a8083063 Iustin Pop

140 a8083063 Iustin Pop
    Note that if the HPATH for a LU class is None, this function will
141 a8083063 Iustin Pop
    not be called.
142 a8083063 Iustin Pop

143 a8083063 Iustin Pop
    """
144 a8083063 Iustin Pop
    raise NotImplementedError
145 a8083063 Iustin Pop
146 a8083063 Iustin Pop
147 a8083063 Iustin Pop
class NoHooksLU(LogicalUnit):
148 a8083063 Iustin Pop
  """Simple LU which runs no hooks.
149 a8083063 Iustin Pop

150 a8083063 Iustin Pop
  This LU is intended as a parent for other LogicalUnits which will
151 a8083063 Iustin Pop
  run no hooks, in order to reduce duplicate code.
152 a8083063 Iustin Pop

153 a8083063 Iustin Pop
  """
154 a8083063 Iustin Pop
  HPATH = None
155 a8083063 Iustin Pop
  HTYPE = None
156 a8083063 Iustin Pop
157 a8083063 Iustin Pop
  def BuildHooksEnv(self):
158 a8083063 Iustin Pop
    """Build hooks env.
159 a8083063 Iustin Pop

160 a8083063 Iustin Pop
    This is a no-op, since we don't run hooks.
161 a8083063 Iustin Pop

162 a8083063 Iustin Pop
    """
163 0e137c28 Iustin Pop
    return {}, [], []
164 a8083063 Iustin Pop
165 a8083063 Iustin Pop
166 dcb93971 Michael Hanselmann
def _GetWantedNodes(lu, nodes):
167 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded node names.
168 83120a01 Michael Hanselmann

169 83120a01 Michael Hanselmann
  Args:
170 83120a01 Michael Hanselmann
    nodes: List of nodes (strings) or None for all
171 83120a01 Michael Hanselmann

172 83120a01 Michael Hanselmann
  """
173 3312b702 Iustin Pop
  if not isinstance(nodes, list):
174 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'nodes'")
175 dcb93971 Michael Hanselmann
176 dcb93971 Michael Hanselmann
  if nodes:
177 3312b702 Iustin Pop
    wanted = []
178 dcb93971 Michael Hanselmann
179 dcb93971 Michael Hanselmann
    for name in nodes:
180 a7ba5e53 Iustin Pop
      node = lu.cfg.ExpandNodeName(name)
181 dcb93971 Michael Hanselmann
      if node is None:
182 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No such node name '%s'" % name)
183 3312b702 Iustin Pop
      wanted.append(node)
184 dcb93971 Michael Hanselmann
185 dcb93971 Michael Hanselmann
  else:
186 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetNodeList()
187 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
188 3312b702 Iustin Pop
189 3312b702 Iustin Pop
190 3312b702 Iustin Pop
def _GetWantedInstances(lu, instances):
191 a7ba5e53 Iustin Pop
  """Returns list of checked and expanded instance names.
192 3312b702 Iustin Pop

193 3312b702 Iustin Pop
  Args:
194 3312b702 Iustin Pop
    instances: List of instances (strings) or None for all
195 3312b702 Iustin Pop

196 3312b702 Iustin Pop
  """
197 3312b702 Iustin Pop
  if not isinstance(instances, list):
198 3312b702 Iustin Pop
    raise errors.OpPrereqError("Invalid argument type 'instances'")
199 3312b702 Iustin Pop
200 3312b702 Iustin Pop
  if instances:
201 3312b702 Iustin Pop
    wanted = []
202 3312b702 Iustin Pop
203 3312b702 Iustin Pop
    for name in instances:
204 a7ba5e53 Iustin Pop
      instance = lu.cfg.ExpandInstanceName(name)
205 3312b702 Iustin Pop
      if instance is None:
206 3312b702 Iustin Pop
        raise errors.OpPrereqError("No such instance name '%s'" % name)
207 3312b702 Iustin Pop
      wanted.append(instance)
208 3312b702 Iustin Pop
209 3312b702 Iustin Pop
  else:
210 a7ba5e53 Iustin Pop
    wanted = lu.cfg.GetInstanceList()
211 a7ba5e53 Iustin Pop
  return utils.NiceSort(wanted)
212 dcb93971 Michael Hanselmann
213 dcb93971 Michael Hanselmann
214 dcb93971 Michael Hanselmann
def _CheckOutputFields(static, dynamic, selected):
215 83120a01 Michael Hanselmann
  """Checks whether all selected fields are valid.
216 83120a01 Michael Hanselmann

217 83120a01 Michael Hanselmann
  Args:
218 83120a01 Michael Hanselmann
    static: Static fields
219 83120a01 Michael Hanselmann
    dynamic: Dynamic fields
220 83120a01 Michael Hanselmann

221 83120a01 Michael Hanselmann
  """
222 83120a01 Michael Hanselmann
  static_fields = frozenset(static)
223 83120a01 Michael Hanselmann
  dynamic_fields = frozenset(dynamic)
224 dcb93971 Michael Hanselmann
225 83120a01 Michael Hanselmann
  all_fields = static_fields | dynamic_fields
226 dcb93971 Michael Hanselmann
227 83120a01 Michael Hanselmann
  if not all_fields.issuperset(selected):
228 3ecf6786 Iustin Pop
    raise errors.OpPrereqError("Unknown output fields selected: %s"
229 3ecf6786 Iustin Pop
                               % ",".join(frozenset(selected).
230 3ecf6786 Iustin Pop
                                          difference(all_fields)))
231 dcb93971 Michael Hanselmann
232 dcb93971 Michael Hanselmann
233 ecb215b5 Michael Hanselmann
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
234 396e1b78 Michael Hanselmann
                          memory, vcpus, nics):
235 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from single variables.
236 ecb215b5 Michael Hanselmann

237 ecb215b5 Michael Hanselmann
  Args:
238 ecb215b5 Michael Hanselmann
    secondary_nodes: List of secondary nodes as strings
239 396e1b78 Michael Hanselmann
  """
240 396e1b78 Michael Hanselmann
  env = {
241 0e137c28 Iustin Pop
    "OP_TARGET": name,
242 396e1b78 Michael Hanselmann
    "INSTANCE_NAME": name,
243 396e1b78 Michael Hanselmann
    "INSTANCE_PRIMARY": primary_node,
244 396e1b78 Michael Hanselmann
    "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
245 ecb215b5 Michael Hanselmann
    "INSTANCE_OS_TYPE": os_type,
246 396e1b78 Michael Hanselmann
    "INSTANCE_STATUS": status,
247 396e1b78 Michael Hanselmann
    "INSTANCE_MEMORY": memory,
248 396e1b78 Michael Hanselmann
    "INSTANCE_VCPUS": vcpus,
249 396e1b78 Michael Hanselmann
  }
250 396e1b78 Michael Hanselmann
251 396e1b78 Michael Hanselmann
  if nics:
252 396e1b78 Michael Hanselmann
    nic_count = len(nics)
253 396e1b78 Michael Hanselmann
    for idx, (ip, bridge) in enumerate(nics):
254 396e1b78 Michael Hanselmann
      if ip is None:
255 396e1b78 Michael Hanselmann
        ip = ""
256 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_IP" % idx] = ip
257 396e1b78 Michael Hanselmann
      env["INSTANCE_NIC%d_BRIDGE" % idx] = bridge
258 396e1b78 Michael Hanselmann
  else:
259 396e1b78 Michael Hanselmann
    nic_count = 0
260 396e1b78 Michael Hanselmann
261 396e1b78 Michael Hanselmann
  env["INSTANCE_NIC_COUNT"] = nic_count
262 396e1b78 Michael Hanselmann
263 396e1b78 Michael Hanselmann
  return env
264 396e1b78 Michael Hanselmann
265 396e1b78 Michael Hanselmann
266 396e1b78 Michael Hanselmann
def _BuildInstanceHookEnvByObject(instance, override=None):
267 ecb215b5 Michael Hanselmann
  """Builds instance related env variables for hooks from an object.
268 ecb215b5 Michael Hanselmann

269 ecb215b5 Michael Hanselmann
  Args:
270 ecb215b5 Michael Hanselmann
    instance: objects.Instance object of instance
271 ecb215b5 Michael Hanselmann
    override: dict of values to override
272 ecb215b5 Michael Hanselmann
  """
273 396e1b78 Michael Hanselmann
  args = {
274 396e1b78 Michael Hanselmann
    'name': instance.name,
275 396e1b78 Michael Hanselmann
    'primary_node': instance.primary_node,
276 396e1b78 Michael Hanselmann
    'secondary_nodes': instance.secondary_nodes,
277 ecb215b5 Michael Hanselmann
    'os_type': instance.os,
278 396e1b78 Michael Hanselmann
    'status': instance.os,
279 396e1b78 Michael Hanselmann
    'memory': instance.memory,
280 396e1b78 Michael Hanselmann
    'vcpus': instance.vcpus,
281 396e1b78 Michael Hanselmann
    'nics': [(nic.ip, nic.bridge) for nic in instance.nics],
282 396e1b78 Michael Hanselmann
  }
283 396e1b78 Michael Hanselmann
  if override:
284 396e1b78 Michael Hanselmann
    args.update(override)
285 396e1b78 Michael Hanselmann
  return _BuildInstanceHookEnv(**args)
286 396e1b78 Michael Hanselmann
287 396e1b78 Michael Hanselmann
288 a8083063 Iustin Pop
def _UpdateEtcHosts(fullnode, ip):
289 a8083063 Iustin Pop
  """Ensure a node has a correct entry in /etc/hosts.
290 a8083063 Iustin Pop

291 a8083063 Iustin Pop
  Args:
292 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
293 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
294 a8083063 Iustin Pop

295 a8083063 Iustin Pop
  """
296 a8083063 Iustin Pop
  node = fullnode.split(".", 1)[0]
297 a8083063 Iustin Pop
298 a8083063 Iustin Pop
  f = open('/etc/hosts', 'r+')
299 a8083063 Iustin Pop
300 a8083063 Iustin Pop
  inthere = False
301 a8083063 Iustin Pop
302 a8083063 Iustin Pop
  save_lines = []
303 a8083063 Iustin Pop
  add_lines = []
304 a8083063 Iustin Pop
  removed = False
305 a8083063 Iustin Pop
306 a8083063 Iustin Pop
  while True:
307 a8083063 Iustin Pop
    rawline = f.readline()
308 a8083063 Iustin Pop
309 a8083063 Iustin Pop
    if not rawline:
310 a8083063 Iustin Pop
      # End of file
311 a8083063 Iustin Pop
      break
312 a8083063 Iustin Pop
313 a8083063 Iustin Pop
    line = rawline.split('\n')[0]
314 a8083063 Iustin Pop
315 a8083063 Iustin Pop
    # Strip off comments
316 a8083063 Iustin Pop
    line = line.split('#')[0]
317 a8083063 Iustin Pop
318 a8083063 Iustin Pop
    if not line:
319 a8083063 Iustin Pop
      # Entire line was comment, skip
320 a8083063 Iustin Pop
      save_lines.append(rawline)
321 a8083063 Iustin Pop
      continue
322 a8083063 Iustin Pop
323 a8083063 Iustin Pop
    fields = line.split()
324 a8083063 Iustin Pop
325 a8083063 Iustin Pop
    haveall = True
326 a8083063 Iustin Pop
    havesome = False
327 a8083063 Iustin Pop
    for spec in [ ip, fullnode, node ]:
328 a8083063 Iustin Pop
      if spec not in fields:
329 a8083063 Iustin Pop
        haveall = False
330 a8083063 Iustin Pop
      if spec in fields:
331 a8083063 Iustin Pop
        havesome = True
332 a8083063 Iustin Pop
333 a8083063 Iustin Pop
    if haveall:
334 a8083063 Iustin Pop
      inthere = True
335 a8083063 Iustin Pop
      save_lines.append(rawline)
336 a8083063 Iustin Pop
      continue
337 a8083063 Iustin Pop
338 a8083063 Iustin Pop
    if havesome and not haveall:
339 a8083063 Iustin Pop
      # Line (old, or manual?) which is missing some.  Remove.
340 a8083063 Iustin Pop
      removed = True
341 a8083063 Iustin Pop
      continue
342 a8083063 Iustin Pop
343 a8083063 Iustin Pop
    save_lines.append(rawline)
344 a8083063 Iustin Pop
345 a8083063 Iustin Pop
  if not inthere:
346 a8083063 Iustin Pop
    add_lines.append('%s\t%s %s\n' % (ip, fullnode, node))
347 a8083063 Iustin Pop
348 a8083063 Iustin Pop
  if removed:
349 a8083063 Iustin Pop
    if add_lines:
350 a8083063 Iustin Pop
      save_lines = save_lines + add_lines
351 a8083063 Iustin Pop
352 a8083063 Iustin Pop
    # We removed a line, write a new file and replace old.
353 a8083063 Iustin Pop
    fd, tmpname = tempfile.mkstemp('tmp', 'hosts_', '/etc')
354 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
355 a8083063 Iustin Pop
    newfile.write(''.join(save_lines))
356 a8083063 Iustin Pop
    newfile.close()
357 a8083063 Iustin Pop
    os.rename(tmpname, '/etc/hosts')
358 a8083063 Iustin Pop
359 a8083063 Iustin Pop
  elif add_lines:
360 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
361 a8083063 Iustin Pop
    f.seek(0, 2)
362 a8083063 Iustin Pop
    for add in add_lines:
363 a8083063 Iustin Pop
      f.write(add)
364 a8083063 Iustin Pop
365 a8083063 Iustin Pop
  f.close()
366 a8083063 Iustin Pop
367 a8083063 Iustin Pop
368 a8083063 Iustin Pop
def _UpdateKnownHosts(fullnode, ip, pubkey):
369 a8083063 Iustin Pop
  """Ensure a node has a correct known_hosts entry.
370 a8083063 Iustin Pop

371 a8083063 Iustin Pop
  Args:
372 a8083063 Iustin Pop
    fullnode - Fully qualified domain name of host. (str)
373 a8083063 Iustin Pop
    ip       - IPv4 address of host (str)
374 a8083063 Iustin Pop
    pubkey   - the public key of the cluster
375 a8083063 Iustin Pop

376 a8083063 Iustin Pop
  """
377 82122173 Iustin Pop
  if os.path.exists(constants.SSH_KNOWN_HOSTS_FILE):
378 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'r+')
379 a8083063 Iustin Pop
  else:
380 82122173 Iustin Pop
    f = open(constants.SSH_KNOWN_HOSTS_FILE, 'w+')
381 a8083063 Iustin Pop
382 a8083063 Iustin Pop
  inthere = False
383 a8083063 Iustin Pop
384 a8083063 Iustin Pop
  save_lines = []
385 a8083063 Iustin Pop
  add_lines = []
386 a8083063 Iustin Pop
  removed = False
387 a8083063 Iustin Pop
388 4cc2a728 Michael Hanselmann
  for rawline in f:
389 a8083063 Iustin Pop
    logger.Debug('read %s' % (repr(rawline),))
390 a8083063 Iustin Pop
391 4cc2a728 Michael Hanselmann
    parts = rawline.rstrip('\r\n').split()
392 4cc2a728 Michael Hanselmann
393 4cc2a728 Michael Hanselmann
    # Ignore unwanted lines
394 4cc2a728 Michael Hanselmann
    if len(parts) >= 3 and not rawline.lstrip()[0] == '#':
395 4cc2a728 Michael Hanselmann
      fields = parts[0].split(',')
396 4cc2a728 Michael Hanselmann
      key = parts[2]
397 4cc2a728 Michael Hanselmann
398 4cc2a728 Michael Hanselmann
      haveall = True
399 4cc2a728 Michael Hanselmann
      havesome = False
400 4cc2a728 Michael Hanselmann
      for spec in [ ip, fullnode ]:
401 4cc2a728 Michael Hanselmann
        if spec not in fields:
402 4cc2a728 Michael Hanselmann
          haveall = False
403 4cc2a728 Michael Hanselmann
        if spec in fields:
404 4cc2a728 Michael Hanselmann
          havesome = True
405 4cc2a728 Michael Hanselmann
406 4cc2a728 Michael Hanselmann
      logger.Debug("key, pubkey = %s." % (repr((key, pubkey)),))
407 4cc2a728 Michael Hanselmann
      if haveall and key == pubkey:
408 4cc2a728 Michael Hanselmann
        inthere = True
409 4cc2a728 Michael Hanselmann
        save_lines.append(rawline)
410 4cc2a728 Michael Hanselmann
        logger.Debug("Keeping known_hosts '%s'." % (repr(rawline),))
411 4cc2a728 Michael Hanselmann
        continue
412 4cc2a728 Michael Hanselmann
413 4cc2a728 Michael Hanselmann
      if havesome and (not haveall or key != pubkey):
414 4cc2a728 Michael Hanselmann
        removed = True
415 4cc2a728 Michael Hanselmann
        logger.Debug("Discarding known_hosts '%s'." % (repr(rawline),))
416 4cc2a728 Michael Hanselmann
        continue
417 a8083063 Iustin Pop
418 a8083063 Iustin Pop
    save_lines.append(rawline)
419 a8083063 Iustin Pop
420 a8083063 Iustin Pop
  if not inthere:
421 a8083063 Iustin Pop
    add_lines.append('%s,%s ssh-rsa %s\n' % (fullnode, ip, pubkey))
422 a8083063 Iustin Pop
    logger.Debug("Adding known_hosts '%s'." % (repr(add_lines[-1]),))
423 a8083063 Iustin Pop
424 a8083063 Iustin Pop
  if removed:
425 a8083063 Iustin Pop
    save_lines = save_lines + add_lines
426 a8083063 Iustin Pop
427 a8083063 Iustin Pop
    # Write a new file and replace old.
428 82122173 Iustin Pop
    fd, tmpname = tempfile.mkstemp('.tmp', 'known_hosts.',
429 82122173 Iustin Pop
                                   constants.DATA_DIR)
430 a8083063 Iustin Pop
    newfile = os.fdopen(fd, 'w')
431 82122173 Iustin Pop
    try:
432 82122173 Iustin Pop
      newfile.write(''.join(save_lines))
433 82122173 Iustin Pop
    finally:
434 82122173 Iustin Pop
      newfile.close()
435 a8083063 Iustin Pop
    logger.Debug("Wrote new known_hosts.")
436 82122173 Iustin Pop
    os.rename(tmpname, constants.SSH_KNOWN_HOSTS_FILE)
437 a8083063 Iustin Pop
438 a8083063 Iustin Pop
  elif add_lines:
439 a8083063 Iustin Pop
    # Simply appending a new line will do the trick.
440 a8083063 Iustin Pop
    f.seek(0, 2)
441 a8083063 Iustin Pop
    for add in add_lines:
442 a8083063 Iustin Pop
      f.write(add)
443 a8083063 Iustin Pop
444 a8083063 Iustin Pop
  f.close()
445 a8083063 Iustin Pop
446 a8083063 Iustin Pop
447 a8083063 Iustin Pop
def _HasValidVG(vglist, vgname):
448 a8083063 Iustin Pop
  """Checks if the volume group list is valid.
449 a8083063 Iustin Pop

450 a8083063 Iustin Pop
  A non-None return value means there's an error, and the return value
451 a8083063 Iustin Pop
  is the error message.
452 a8083063 Iustin Pop

453 a8083063 Iustin Pop
  """
454 a8083063 Iustin Pop
  vgsize = vglist.get(vgname, None)
455 a8083063 Iustin Pop
  if vgsize is None:
456 a8083063 Iustin Pop
    return "volume group '%s' missing" % vgname
457 a8083063 Iustin Pop
  elif vgsize < 20480:
458 191a8385 Guido Trotter
    return ("volume group '%s' too small (20480MiB required, %dMib found)" %
459 191a8385 Guido Trotter
            (vgname, vgsize))
460 a8083063 Iustin Pop
  return None
461 a8083063 Iustin Pop
462 a8083063 Iustin Pop
463 a8083063 Iustin Pop
def _InitSSHSetup(node):
464 a8083063 Iustin Pop
  """Setup the SSH configuration for the cluster.
465 a8083063 Iustin Pop

466 a8083063 Iustin Pop

467 a8083063 Iustin Pop
  This generates a dsa keypair for root, adds the pub key to the
468 a8083063 Iustin Pop
  permitted hosts and adds the hostkey to its own known hosts.
469 a8083063 Iustin Pop

470 a8083063 Iustin Pop
  Args:
471 a8083063 Iustin Pop
    node: the name of this host as a fqdn
472 a8083063 Iustin Pop

473 a8083063 Iustin Pop
  """
474 70d9e3d8 Iustin Pop
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
475 a8083063 Iustin Pop
476 70d9e3d8 Iustin Pop
  for name in priv_key, pub_key:
477 70d9e3d8 Iustin Pop
    if os.path.exists(name):
478 70d9e3d8 Iustin Pop
      utils.CreateBackup(name)
479 70d9e3d8 Iustin Pop
    utils.RemoveFile(name)
480 a8083063 Iustin Pop
481 a8083063 Iustin Pop
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
482 70d9e3d8 Iustin Pop
                         "-f", priv_key,
483 a8083063 Iustin Pop
                         "-q", "-N", ""])
484 a8083063 Iustin Pop
  if result.failed:
485 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
486 3ecf6786 Iustin Pop
                             result.output)
487 a8083063 Iustin Pop
488 70d9e3d8 Iustin Pop
  f = open(pub_key, 'r')
489 a8083063 Iustin Pop
  try:
490 70d9e3d8 Iustin Pop
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
491 a8083063 Iustin Pop
  finally:
492 a8083063 Iustin Pop
    f.close()
493 a8083063 Iustin Pop
494 a8083063 Iustin Pop
495 a8083063 Iustin Pop
def _InitGanetiServerSetup(ss):
496 a8083063 Iustin Pop
  """Setup the necessary configuration for the initial node daemon.
497 a8083063 Iustin Pop

498 a8083063 Iustin Pop
  This creates the nodepass file containing the shared password for
499 a8083063 Iustin Pop
  the cluster and also generates the SSL certificate.
500 a8083063 Iustin Pop

501 a8083063 Iustin Pop
  """
502 a8083063 Iustin Pop
  # Create pseudo random password
503 a8083063 Iustin Pop
  randpass = sha.new(os.urandom(64)).hexdigest()
504 a8083063 Iustin Pop
  # and write it into sstore
505 a8083063 Iustin Pop
  ss.SetKey(ss.SS_NODED_PASS, randpass)
506 a8083063 Iustin Pop
507 a8083063 Iustin Pop
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
508 a8083063 Iustin Pop
                         "-days", str(365*5), "-nodes", "-x509",
509 a8083063 Iustin Pop
                         "-keyout", constants.SSL_CERT_FILE,
510 a8083063 Iustin Pop
                         "-out", constants.SSL_CERT_FILE, "-batch"])
511 a8083063 Iustin Pop
  if result.failed:
512 3ecf6786 Iustin Pop
    raise errors.OpExecError("could not generate server ssl cert, command"
513 3ecf6786 Iustin Pop
                             " %s had exitcode %s and error message %s" %
514 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
515 a8083063 Iustin Pop
516 a8083063 Iustin Pop
  os.chmod(constants.SSL_CERT_FILE, 0400)
517 a8083063 Iustin Pop
518 a8083063 Iustin Pop
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
519 a8083063 Iustin Pop
520 a8083063 Iustin Pop
  if result.failed:
521 3ecf6786 Iustin Pop
    raise errors.OpExecError("Could not start the node daemon, command %s"
522 3ecf6786 Iustin Pop
                             " had exitcode %s and error %s" %
523 3ecf6786 Iustin Pop
                             (result.cmd, result.exit_code, result.output))
524 a8083063 Iustin Pop
525 a8083063 Iustin Pop
526 bf6929a2 Alexander Schreiber
def _CheckInstanceBridgesExist(instance):
527 bf6929a2 Alexander Schreiber
  """Check that the brigdes needed by an instance exist.
528 bf6929a2 Alexander Schreiber

529 bf6929a2 Alexander Schreiber
  """
530 bf6929a2 Alexander Schreiber
  # check bridges existance
531 bf6929a2 Alexander Schreiber
  brlist = [nic.bridge for nic in instance.nics]
532 bf6929a2 Alexander Schreiber
  if not rpc.call_bridges_exist(instance.primary_node, brlist):
533 bf6929a2 Alexander Schreiber
    raise errors.OpPrereqError("one or more target bridges %s does not"
534 bf6929a2 Alexander Schreiber
                               " exist on destination node '%s'" %
535 bf6929a2 Alexander Schreiber
                               (brlist, instance.primary_node))
536 bf6929a2 Alexander Schreiber
537 bf6929a2 Alexander Schreiber
538 a8083063 Iustin Pop
class LUInitCluster(LogicalUnit):
539 a8083063 Iustin Pop
  """Initialise the cluster.
540 a8083063 Iustin Pop

541 a8083063 Iustin Pop
  """
542 a8083063 Iustin Pop
  HPATH = "cluster-init"
543 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
544 a8083063 Iustin Pop
  _OP_REQP = ["cluster_name", "hypervisor_type", "vg_name", "mac_prefix",
545 880478f8 Iustin Pop
              "def_bridge", "master_netdev"]
546 a8083063 Iustin Pop
  REQ_CLUSTER = False
547 a8083063 Iustin Pop
548 a8083063 Iustin Pop
  def BuildHooksEnv(self):
549 a8083063 Iustin Pop
    """Build hooks env.
550 a8083063 Iustin Pop

551 a8083063 Iustin Pop
    Notes: Since we don't require a cluster, we must manually add
552 a8083063 Iustin Pop
    ourselves in the post-run node list.
553 a8083063 Iustin Pop

554 a8083063 Iustin Pop
    """
555 0e137c28 Iustin Pop
    env = {"OP_TARGET": self.op.cluster_name}
556 0e137c28 Iustin Pop
    return env, [], [self.hostname.name]
557 a8083063 Iustin Pop
558 a8083063 Iustin Pop
  def CheckPrereq(self):
559 a8083063 Iustin Pop
    """Verify that the passed name is a valid one.
560 a8083063 Iustin Pop

561 a8083063 Iustin Pop
    """
562 a8083063 Iustin Pop
    if config.ConfigWriter.IsCluster():
563 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cluster is already initialised")
564 a8083063 Iustin Pop
565 89e1fc26 Iustin Pop
    self.hostname = hostname = utils.HostInfo()
566 ff98055b Iustin Pop
567 bcf043c9 Iustin Pop
    if hostname.ip.startswith("127."):
568 130e907e Iustin Pop
      raise errors.OpPrereqError("This host's IP resolves to the private"
569 130e907e Iustin Pop
                                 " range (%s). Please fix DNS or /etc/hosts." %
570 bcf043c9 Iustin Pop
                                 (hostname.ip,))
571 130e907e Iustin Pop
572 89e1fc26 Iustin Pop
    self.clustername = clustername = utils.HostInfo(self.op.cluster_name)
573 a8083063 Iustin Pop
574 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, hostname.ip,
575 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
576 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Inconsistency: this host's name resolves"
577 3ecf6786 Iustin Pop
                                 " to %s,\nbut this ip address does not"
578 3ecf6786 Iustin Pop
                                 " belong to this host."
579 bcf043c9 Iustin Pop
                                 " Aborting." % hostname.ip)
580 a8083063 Iustin Pop
581 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
582 a8083063 Iustin Pop
    if secondary_ip and not utils.IsValidIP(secondary_ip):
583 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary ip given")
584 16abfbc2 Alexander Schreiber
    if (secondary_ip and
585 16abfbc2 Alexander Schreiber
        secondary_ip != hostname.ip and
586 16abfbc2 Alexander Schreiber
        (not utils.TcpPing(constants.LOCALHOST_IP_ADDRESS, secondary_ip,
587 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT))):
588 16abfbc2 Alexander Schreiber
      raise errors.OpPrereqError("You gave %s as secondary IP,\n"
589 16abfbc2 Alexander Schreiber
                                 "but it does not belong to this host." %
590 16abfbc2 Alexander Schreiber
                                 secondary_ip)
591 a8083063 Iustin Pop
    self.secondary_ip = secondary_ip
592 a8083063 Iustin Pop
593 a8083063 Iustin Pop
    # checks presence of the volume group given
594 a8083063 Iustin Pop
    vgstatus = _HasValidVG(utils.ListVolumeGroups(), self.op.vg_name)
595 a8083063 Iustin Pop
596 a8083063 Iustin Pop
    if vgstatus:
597 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Error: %s" % vgstatus)
598 a8083063 Iustin Pop
599 a8083063 Iustin Pop
    if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$",
600 a8083063 Iustin Pop
                    self.op.mac_prefix):
601 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid mac prefix given '%s'" %
602 3ecf6786 Iustin Pop
                                 self.op.mac_prefix)
603 a8083063 Iustin Pop
604 a8083063 Iustin Pop
    if self.op.hypervisor_type not in hypervisor.VALID_HTYPES:
605 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
606 3ecf6786 Iustin Pop
                                 self.op.hypervisor_type)
607 a8083063 Iustin Pop
608 880478f8 Iustin Pop
    result = utils.RunCmd(["ip", "link", "show", "dev", self.op.master_netdev])
609 880478f8 Iustin Pop
    if result.failed:
610 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
611 8925faaa Iustin Pop
                                 (self.op.master_netdev,
612 8925faaa Iustin Pop
                                  result.output.strip()))
613 880478f8 Iustin Pop
614 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
615 a8083063 Iustin Pop
    """Initialize the cluster.
616 a8083063 Iustin Pop

617 a8083063 Iustin Pop
    """
618 a8083063 Iustin Pop
    clustername = self.clustername
619 a8083063 Iustin Pop
    hostname = self.hostname
620 a8083063 Iustin Pop
621 a8083063 Iustin Pop
    # set up the simple store
622 4167825b Iustin Pop
    self.sstore = ss = ssconf.SimpleStore()
623 a8083063 Iustin Pop
    ss.SetKey(ss.SS_HYPERVISOR, self.op.hypervisor_type)
624 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
625 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
626 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NETDEV, self.op.master_netdev)
627 bcf043c9 Iustin Pop
    ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
628 a8083063 Iustin Pop
629 a8083063 Iustin Pop
    # set up the inter-node password and certificate
630 a8083063 Iustin Pop
    _InitGanetiServerSetup(ss)
631 a8083063 Iustin Pop
632 a8083063 Iustin Pop
    # start the master ip
633 bcf043c9 Iustin Pop
    rpc.call_node_start_master(hostname.name)
634 a8083063 Iustin Pop
635 a8083063 Iustin Pop
    # set up ssh config and /etc/hosts
636 70d9e3d8 Iustin Pop
    f = open(constants.SSH_HOST_RSA_PUB, 'r')
637 a8083063 Iustin Pop
    try:
638 a8083063 Iustin Pop
      sshline = f.read()
639 a8083063 Iustin Pop
    finally:
640 a8083063 Iustin Pop
      f.close()
641 a8083063 Iustin Pop
    sshkey = sshline.split(" ")[1]
642 a8083063 Iustin Pop
643 bcf043c9 Iustin Pop
    _UpdateEtcHosts(hostname.name, hostname.ip)
644 a8083063 Iustin Pop
645 bcf043c9 Iustin Pop
    _UpdateKnownHosts(hostname.name, hostname.ip, sshkey)
646 a8083063 Iustin Pop
647 bcf043c9 Iustin Pop
    _InitSSHSetup(hostname.name)
648 a8083063 Iustin Pop
649 a8083063 Iustin Pop
    # init of cluster config file
650 4167825b Iustin Pop
    self.cfg = cfgw = config.ConfigWriter()
651 bcf043c9 Iustin Pop
    cfgw.InitConfig(hostname.name, hostname.ip, self.secondary_ip,
652 5fcdc80d Iustin Pop
                    sshkey, self.op.mac_prefix,
653 a8083063 Iustin Pop
                    self.op.vg_name, self.op.def_bridge)
654 a8083063 Iustin Pop
655 a8083063 Iustin Pop
656 a8083063 Iustin Pop
class LUDestroyCluster(NoHooksLU):
657 a8083063 Iustin Pop
  """Logical unit for destroying the cluster.
658 a8083063 Iustin Pop

659 a8083063 Iustin Pop
  """
660 a8083063 Iustin Pop
  _OP_REQP = []
661 a8083063 Iustin Pop
662 a8083063 Iustin Pop
  def CheckPrereq(self):
663 a8083063 Iustin Pop
    """Check prerequisites.
664 a8083063 Iustin Pop

665 a8083063 Iustin Pop
    This checks whether the cluster is empty.
666 a8083063 Iustin Pop

667 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
668 a8083063 Iustin Pop

669 a8083063 Iustin Pop
    """
670 880478f8 Iustin Pop
    master = self.sstore.GetMasterNode()
671 a8083063 Iustin Pop
672 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
673 db915bd1 Michael Hanselmann
    if len(nodelist) != 1 or nodelist[0] != master:
674 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d node(s) in"
675 3ecf6786 Iustin Pop
                                 " this cluster." % (len(nodelist) - 1))
676 db915bd1 Michael Hanselmann
    instancelist = self.cfg.GetInstanceList()
677 db915bd1 Michael Hanselmann
    if instancelist:
678 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("There are still %d instance(s) in"
679 3ecf6786 Iustin Pop
                                 " this cluster." % len(instancelist))
680 a8083063 Iustin Pop
681 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
682 a8083063 Iustin Pop
    """Destroys the cluster.
683 a8083063 Iustin Pop

684 a8083063 Iustin Pop
    """
685 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
686 70d9e3d8 Iustin Pop
    utils.CreateBackup(priv_key)
687 70d9e3d8 Iustin Pop
    utils.CreateBackup(pub_key)
688 880478f8 Iustin Pop
    rpc.call_node_leave_cluster(self.sstore.GetMasterNode())
689 a8083063 Iustin Pop
690 a8083063 Iustin Pop
691 a8083063 Iustin Pop
class LUVerifyCluster(NoHooksLU):
692 a8083063 Iustin Pop
  """Verifies the cluster status.
693 a8083063 Iustin Pop

694 a8083063 Iustin Pop
  """
695 a8083063 Iustin Pop
  _OP_REQP = []
696 a8083063 Iustin Pop
697 a8083063 Iustin Pop
  def _VerifyNode(self, node, file_list, local_cksum, vglist, node_result,
698 a8083063 Iustin Pop
                  remote_version, feedback_fn):
699 a8083063 Iustin Pop
    """Run multiple tests against a node.
700 a8083063 Iustin Pop

701 a8083063 Iustin Pop
    Test list:
702 a8083063 Iustin Pop
      - compares ganeti version
703 a8083063 Iustin Pop
      - checks vg existance and size > 20G
704 a8083063 Iustin Pop
      - checks config file checksum
705 a8083063 Iustin Pop
      - checks ssh to other nodes
706 a8083063 Iustin Pop

707 a8083063 Iustin Pop
    Args:
708 a8083063 Iustin Pop
      node: name of the node to check
709 a8083063 Iustin Pop
      file_list: required list of files
710 a8083063 Iustin Pop
      local_cksum: dictionary of local files and their checksums
711 098c0958 Michael Hanselmann

712 a8083063 Iustin Pop
    """
713 a8083063 Iustin Pop
    # compares ganeti version
714 a8083063 Iustin Pop
    local_version = constants.PROTOCOL_VERSION
715 a8083063 Iustin Pop
    if not remote_version:
716 a8083063 Iustin Pop
      feedback_fn(" - ERROR: connection to %s failed" % (node))
717 a8083063 Iustin Pop
      return True
718 a8083063 Iustin Pop
719 a8083063 Iustin Pop
    if local_version != remote_version:
720 a8083063 Iustin Pop
      feedback_fn("  - ERROR: sw version mismatch: master %s, node(%s) %s" %
721 a8083063 Iustin Pop
                      (local_version, node, remote_version))
722 a8083063 Iustin Pop
      return True
723 a8083063 Iustin Pop
724 a8083063 Iustin Pop
    # checks vg existance and size > 20G
725 a8083063 Iustin Pop
726 a8083063 Iustin Pop
    bad = False
727 a8083063 Iustin Pop
    if not vglist:
728 a8083063 Iustin Pop
      feedback_fn("  - ERROR: unable to check volume groups on node %s." %
729 a8083063 Iustin Pop
                      (node,))
730 a8083063 Iustin Pop
      bad = True
731 a8083063 Iustin Pop
    else:
732 a8083063 Iustin Pop
      vgstatus = _HasValidVG(vglist, self.cfg.GetVGName())
733 a8083063 Iustin Pop
      if vgstatus:
734 a8083063 Iustin Pop
        feedback_fn("  - ERROR: %s on node %s" % (vgstatus, node))
735 a8083063 Iustin Pop
        bad = True
736 a8083063 Iustin Pop
737 a8083063 Iustin Pop
    # checks config file checksum
738 a8083063 Iustin Pop
    # checks ssh to any
739 a8083063 Iustin Pop
740 a8083063 Iustin Pop
    if 'filelist' not in node_result:
741 a8083063 Iustin Pop
      bad = True
742 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned file checksum data")
743 a8083063 Iustin Pop
    else:
744 a8083063 Iustin Pop
      remote_cksum = node_result['filelist']
745 a8083063 Iustin Pop
      for file_name in file_list:
746 a8083063 Iustin Pop
        if file_name not in remote_cksum:
747 a8083063 Iustin Pop
          bad = True
748 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' missing" % file_name)
749 a8083063 Iustin Pop
        elif remote_cksum[file_name] != local_cksum[file_name]:
750 a8083063 Iustin Pop
          bad = True
751 a8083063 Iustin Pop
          feedback_fn("  - ERROR: file '%s' has wrong checksum" % file_name)
752 a8083063 Iustin Pop
753 a8083063 Iustin Pop
    if 'nodelist' not in node_result:
754 a8083063 Iustin Pop
      bad = True
755 a8083063 Iustin Pop
      feedback_fn("  - ERROR: node hasn't returned node connectivity data")
756 a8083063 Iustin Pop
    else:
757 a8083063 Iustin Pop
      if node_result['nodelist']:
758 a8083063 Iustin Pop
        bad = True
759 a8083063 Iustin Pop
        for node in node_result['nodelist']:
760 a8083063 Iustin Pop
          feedback_fn("  - ERROR: communication with node '%s': %s" %
761 a8083063 Iustin Pop
                          (node, node_result['nodelist'][node]))
762 a8083063 Iustin Pop
    hyp_result = node_result.get('hypervisor', None)
763 a8083063 Iustin Pop
    if hyp_result is not None:
764 a8083063 Iustin Pop
      feedback_fn("  - ERROR: hypervisor verify failure: '%s'" % hyp_result)
765 a8083063 Iustin Pop
    return bad
766 a8083063 Iustin Pop
767 a8083063 Iustin Pop
  def _VerifyInstance(self, instance, node_vol_is, node_instance, feedback_fn):
768 a8083063 Iustin Pop
    """Verify an instance.
769 a8083063 Iustin Pop

770 a8083063 Iustin Pop
    This function checks to see if the required block devices are
771 a8083063 Iustin Pop
    available on the instance's node.
772 a8083063 Iustin Pop

773 a8083063 Iustin Pop
    """
774 a8083063 Iustin Pop
    bad = False
775 a8083063 Iustin Pop
776 a8083063 Iustin Pop
    instancelist = self.cfg.GetInstanceList()
777 a8083063 Iustin Pop
    if not instance in instancelist:
778 a8083063 Iustin Pop
      feedback_fn("  - ERROR: instance %s not in instance list %s" %
779 a8083063 Iustin Pop
                      (instance, instancelist))
780 a8083063 Iustin Pop
      bad = True
781 a8083063 Iustin Pop
782 a8083063 Iustin Pop
    instanceconfig = self.cfg.GetInstanceInfo(instance)
783 a8083063 Iustin Pop
    node_current = instanceconfig.primary_node
784 a8083063 Iustin Pop
785 a8083063 Iustin Pop
    node_vol_should = {}
786 a8083063 Iustin Pop
    instanceconfig.MapLVsByNode(node_vol_should)
787 a8083063 Iustin Pop
788 a8083063 Iustin Pop
    for node in node_vol_should:
789 a8083063 Iustin Pop
      for volume in node_vol_should[node]:
790 a8083063 Iustin Pop
        if node not in node_vol_is or volume not in node_vol_is[node]:
791 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s missing on node %s" %
792 a8083063 Iustin Pop
                          (volume, node))
793 a8083063 Iustin Pop
          bad = True
794 a8083063 Iustin Pop
795 a8083063 Iustin Pop
    if not instanceconfig.status == 'down':
796 a8083063 Iustin Pop
      if not instance in node_instance[node_current]:
797 a8083063 Iustin Pop
        feedback_fn("  - ERROR: instance %s not running on node %s" %
798 a8083063 Iustin Pop
                        (instance, node_current))
799 a8083063 Iustin Pop
        bad = True
800 a8083063 Iustin Pop
801 a8083063 Iustin Pop
    for node in node_instance:
802 a8083063 Iustin Pop
      if (not node == node_current):
803 a8083063 Iustin Pop
        if instance in node_instance[node]:
804 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s should not run on node %s" %
805 a8083063 Iustin Pop
                          (instance, node))
806 a8083063 Iustin Pop
          bad = True
807 a8083063 Iustin Pop
808 6a438c98 Michael Hanselmann
    return bad
809 a8083063 Iustin Pop
810 a8083063 Iustin Pop
  def _VerifyOrphanVolumes(self, node_vol_should, node_vol_is, feedback_fn):
811 a8083063 Iustin Pop
    """Verify if there are any unknown volumes in the cluster.
812 a8083063 Iustin Pop

813 a8083063 Iustin Pop
    The .os, .swap and backup volumes are ignored. All other volumes are
814 a8083063 Iustin Pop
    reported as unknown.
815 a8083063 Iustin Pop

816 a8083063 Iustin Pop
    """
817 a8083063 Iustin Pop
    bad = False
818 a8083063 Iustin Pop
819 a8083063 Iustin Pop
    for node in node_vol_is:
820 a8083063 Iustin Pop
      for volume in node_vol_is[node]:
821 a8083063 Iustin Pop
        if node not in node_vol_should or volume not in node_vol_should[node]:
822 a8083063 Iustin Pop
          feedback_fn("  - ERROR: volume %s on node %s should not exist" %
823 a8083063 Iustin Pop
                      (volume, node))
824 a8083063 Iustin Pop
          bad = True
825 a8083063 Iustin Pop
    return bad
826 a8083063 Iustin Pop
827 a8083063 Iustin Pop
  def _VerifyOrphanInstances(self, instancelist, node_instance, feedback_fn):
828 a8083063 Iustin Pop
    """Verify the list of running instances.
829 a8083063 Iustin Pop

830 a8083063 Iustin Pop
    This checks what instances are running but unknown to the cluster.
831 a8083063 Iustin Pop

832 a8083063 Iustin Pop
    """
833 a8083063 Iustin Pop
    bad = False
834 a8083063 Iustin Pop
    for node in node_instance:
835 a8083063 Iustin Pop
      for runninginstance in node_instance[node]:
836 a8083063 Iustin Pop
        if runninginstance not in instancelist:
837 a8083063 Iustin Pop
          feedback_fn("  - ERROR: instance %s on node %s should not exist" %
838 a8083063 Iustin Pop
                          (runninginstance, node))
839 a8083063 Iustin Pop
          bad = True
840 a8083063 Iustin Pop
    return bad
841 a8083063 Iustin Pop
842 a8083063 Iustin Pop
  def CheckPrereq(self):
843 a8083063 Iustin Pop
    """Check prerequisites.
844 a8083063 Iustin Pop

845 a8083063 Iustin Pop
    This has no prerequisites.
846 a8083063 Iustin Pop

847 a8083063 Iustin Pop
    """
848 a8083063 Iustin Pop
    pass
849 a8083063 Iustin Pop
850 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
851 a8083063 Iustin Pop
    """Verify integrity of cluster, performing various test on nodes.
852 a8083063 Iustin Pop

853 a8083063 Iustin Pop
    """
854 a8083063 Iustin Pop
    bad = False
855 a8083063 Iustin Pop
    feedback_fn("* Verifying global settings")
856 a8083063 Iustin Pop
    self.cfg.VerifyConfig()
857 a8083063 Iustin Pop
858 a8083063 Iustin Pop
    vg_name = self.cfg.GetVGName()
859 a8083063 Iustin Pop
    nodelist = utils.NiceSort(self.cfg.GetNodeList())
860 a8083063 Iustin Pop
    instancelist = utils.NiceSort(self.cfg.GetInstanceList())
861 a8083063 Iustin Pop
    node_volume = {}
862 a8083063 Iustin Pop
    node_instance = {}
863 a8083063 Iustin Pop
864 a8083063 Iustin Pop
    # FIXME: verify OS list
865 a8083063 Iustin Pop
    # do local checksums
866 cb91d46e Iustin Pop
    file_names = list(self.sstore.GetFileList())
867 cb91d46e Iustin Pop
    file_names.append(constants.SSL_CERT_FILE)
868 cb91d46e Iustin Pop
    file_names.append(constants.CLUSTER_CONF_FILE)
869 a8083063 Iustin Pop
    local_checksums = utils.FingerprintFiles(file_names)
870 a8083063 Iustin Pop
871 a8083063 Iustin Pop
    feedback_fn("* Gathering data (%d nodes)" % len(nodelist))
872 a8083063 Iustin Pop
    all_volumeinfo = rpc.call_volume_list(nodelist, vg_name)
873 a8083063 Iustin Pop
    all_instanceinfo = rpc.call_instance_list(nodelist)
874 a8083063 Iustin Pop
    all_vglist = rpc.call_vg_list(nodelist)
875 a8083063 Iustin Pop
    node_verify_param = {
876 a8083063 Iustin Pop
      'filelist': file_names,
877 a8083063 Iustin Pop
      'nodelist': nodelist,
878 a8083063 Iustin Pop
      'hypervisor': None,
879 a8083063 Iustin Pop
      }
880 a8083063 Iustin Pop
    all_nvinfo = rpc.call_node_verify(nodelist, node_verify_param)
881 a8083063 Iustin Pop
    all_rversion = rpc.call_version(nodelist)
882 a8083063 Iustin Pop
883 a8083063 Iustin Pop
    for node in nodelist:
884 a8083063 Iustin Pop
      feedback_fn("* Verifying node %s" % node)
885 a8083063 Iustin Pop
      result = self._VerifyNode(node, file_names, local_checksums,
886 a8083063 Iustin Pop
                                all_vglist[node], all_nvinfo[node],
887 a8083063 Iustin Pop
                                all_rversion[node], feedback_fn)
888 a8083063 Iustin Pop
      bad = bad or result
889 a8083063 Iustin Pop
890 a8083063 Iustin Pop
      # node_volume
891 a8083063 Iustin Pop
      volumeinfo = all_volumeinfo[node]
892 a8083063 Iustin Pop
893 a8083063 Iustin Pop
      if type(volumeinfo) != dict:
894 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
895 a8083063 Iustin Pop
        bad = True
896 a8083063 Iustin Pop
        continue
897 a8083063 Iustin Pop
898 a8083063 Iustin Pop
      node_volume[node] = volumeinfo
899 a8083063 Iustin Pop
900 a8083063 Iustin Pop
      # node_instance
901 a8083063 Iustin Pop
      nodeinstance = all_instanceinfo[node]
902 a8083063 Iustin Pop
      if type(nodeinstance) != list:
903 a8083063 Iustin Pop
        feedback_fn("  - ERROR: connection to %s failed" % (node,))
904 a8083063 Iustin Pop
        bad = True
905 a8083063 Iustin Pop
        continue
906 a8083063 Iustin Pop
907 a8083063 Iustin Pop
      node_instance[node] = nodeinstance
908 a8083063 Iustin Pop
909 a8083063 Iustin Pop
    node_vol_should = {}
910 a8083063 Iustin Pop
911 a8083063 Iustin Pop
    for instance in instancelist:
912 a8083063 Iustin Pop
      feedback_fn("* Verifying instance %s" % instance)
913 a8083063 Iustin Pop
      result =  self._VerifyInstance(instance, node_volume, node_instance,
914 a8083063 Iustin Pop
                                     feedback_fn)
915 a8083063 Iustin Pop
      bad = bad or result
916 a8083063 Iustin Pop
917 a8083063 Iustin Pop
      inst_config = self.cfg.GetInstanceInfo(instance)
918 a8083063 Iustin Pop
919 a8083063 Iustin Pop
      inst_config.MapLVsByNode(node_vol_should)
920 a8083063 Iustin Pop
921 a8083063 Iustin Pop
    feedback_fn("* Verifying orphan volumes")
922 a8083063 Iustin Pop
    result = self._VerifyOrphanVolumes(node_vol_should, node_volume,
923 a8083063 Iustin Pop
                                       feedback_fn)
924 a8083063 Iustin Pop
    bad = bad or result
925 a8083063 Iustin Pop
926 a8083063 Iustin Pop
    feedback_fn("* Verifying remaining instances")
927 a8083063 Iustin Pop
    result = self._VerifyOrphanInstances(instancelist, node_instance,
928 a8083063 Iustin Pop
                                         feedback_fn)
929 a8083063 Iustin Pop
    bad = bad or result
930 a8083063 Iustin Pop
931 a8083063 Iustin Pop
    return int(bad)
932 a8083063 Iustin Pop
933 a8083063 Iustin Pop
934 07bd8a51 Iustin Pop
class LURenameCluster(LogicalUnit):
935 07bd8a51 Iustin Pop
  """Rename the cluster.
936 07bd8a51 Iustin Pop

937 07bd8a51 Iustin Pop
  """
938 07bd8a51 Iustin Pop
  HPATH = "cluster-rename"
939 07bd8a51 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
940 07bd8a51 Iustin Pop
  _OP_REQP = ["name"]
941 07bd8a51 Iustin Pop
942 07bd8a51 Iustin Pop
  def BuildHooksEnv(self):
943 07bd8a51 Iustin Pop
    """Build hooks env.
944 07bd8a51 Iustin Pop

945 07bd8a51 Iustin Pop
    """
946 07bd8a51 Iustin Pop
    env = {
947 0e137c28 Iustin Pop
      "OP_TARGET": self.op.sstore.GetClusterName(),
948 07bd8a51 Iustin Pop
      "NEW_NAME": self.op.name,
949 07bd8a51 Iustin Pop
      }
950 07bd8a51 Iustin Pop
    mn = self.sstore.GetMasterNode()
951 07bd8a51 Iustin Pop
    return env, [mn], [mn]
952 07bd8a51 Iustin Pop
953 07bd8a51 Iustin Pop
  def CheckPrereq(self):
954 07bd8a51 Iustin Pop
    """Verify that the passed name is a valid one.
955 07bd8a51 Iustin Pop

956 07bd8a51 Iustin Pop
    """
957 89e1fc26 Iustin Pop
    hostname = utils.HostInfo(self.op.name)
958 07bd8a51 Iustin Pop
959 bcf043c9 Iustin Pop
    new_name = hostname.name
960 bcf043c9 Iustin Pop
    self.ip = new_ip = hostname.ip
961 07bd8a51 Iustin Pop
    old_name = self.sstore.GetClusterName()
962 07bd8a51 Iustin Pop
    old_ip = self.sstore.GetMasterIP()
963 07bd8a51 Iustin Pop
    if new_name == old_name and new_ip == old_ip:
964 07bd8a51 Iustin Pop
      raise errors.OpPrereqError("Neither the name nor the IP address of the"
965 07bd8a51 Iustin Pop
                                 " cluster has changed")
966 07bd8a51 Iustin Pop
    if new_ip != old_ip:
967 07bd8a51 Iustin Pop
      result = utils.RunCmd(["fping", "-q", new_ip])
968 07bd8a51 Iustin Pop
      if not result.failed:
969 07bd8a51 Iustin Pop
        raise errors.OpPrereqError("The given cluster IP address (%s) is"
970 07bd8a51 Iustin Pop
                                   " reachable on the network. Aborting." %
971 07bd8a51 Iustin Pop
                                   new_ip)
972 07bd8a51 Iustin Pop
973 07bd8a51 Iustin Pop
    self.op.name = new_name
974 07bd8a51 Iustin Pop
975 07bd8a51 Iustin Pop
  def Exec(self, feedback_fn):
976 07bd8a51 Iustin Pop
    """Rename the cluster.
977 07bd8a51 Iustin Pop

978 07bd8a51 Iustin Pop
    """
979 07bd8a51 Iustin Pop
    clustername = self.op.name
980 07bd8a51 Iustin Pop
    ip = self.ip
981 07bd8a51 Iustin Pop
    ss = self.sstore
982 07bd8a51 Iustin Pop
983 07bd8a51 Iustin Pop
    # shutdown the master IP
984 07bd8a51 Iustin Pop
    master = ss.GetMasterNode()
985 07bd8a51 Iustin Pop
    if not rpc.call_node_stop_master(master):
986 07bd8a51 Iustin Pop
      raise errors.OpExecError("Could not disable the master role")
987 07bd8a51 Iustin Pop
988 07bd8a51 Iustin Pop
    try:
989 07bd8a51 Iustin Pop
      # modify the sstore
990 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_MASTER_IP, ip)
991 07bd8a51 Iustin Pop
      ss.SetKey(ss.SS_CLUSTER_NAME, clustername)
992 07bd8a51 Iustin Pop
993 07bd8a51 Iustin Pop
      # Distribute updated ss config to all nodes
994 07bd8a51 Iustin Pop
      myself = self.cfg.GetNodeInfo(master)
995 07bd8a51 Iustin Pop
      dist_nodes = self.cfg.GetNodeList()
996 07bd8a51 Iustin Pop
      if myself.name in dist_nodes:
997 07bd8a51 Iustin Pop
        dist_nodes.remove(myself.name)
998 07bd8a51 Iustin Pop
999 07bd8a51 Iustin Pop
      logger.Debug("Copying updated ssconf data to all nodes")
1000 07bd8a51 Iustin Pop
      for keyname in [ss.SS_CLUSTER_NAME, ss.SS_MASTER_IP]:
1001 07bd8a51 Iustin Pop
        fname = ss.KeyToFilename(keyname)
1002 07bd8a51 Iustin Pop
        result = rpc.call_upload_file(dist_nodes, fname)
1003 07bd8a51 Iustin Pop
        for to_node in dist_nodes:
1004 07bd8a51 Iustin Pop
          if not result[to_node]:
1005 07bd8a51 Iustin Pop
            logger.Error("copy of file %s to node %s failed" %
1006 07bd8a51 Iustin Pop
                         (fname, to_node))
1007 07bd8a51 Iustin Pop
    finally:
1008 07bd8a51 Iustin Pop
      if not rpc.call_node_start_master(master):
1009 07bd8a51 Iustin Pop
        logger.Error("Could not re-enable the master role on the master,\n"
1010 07bd8a51 Iustin Pop
                     "please restart manually.")
1011 07bd8a51 Iustin Pop
1012 07bd8a51 Iustin Pop
1013 a8083063 Iustin Pop
def _WaitForSync(cfgw, instance, oneshot=False, unlock=False):
1014 a8083063 Iustin Pop
  """Sleep and poll for an instance's disk to sync.
1015 a8083063 Iustin Pop

1016 a8083063 Iustin Pop
  """
1017 a8083063 Iustin Pop
  if not instance.disks:
1018 a8083063 Iustin Pop
    return True
1019 a8083063 Iustin Pop
1020 a8083063 Iustin Pop
  if not oneshot:
1021 a8083063 Iustin Pop
    logger.ToStdout("Waiting for instance %s to sync disks." % instance.name)
1022 a8083063 Iustin Pop
1023 a8083063 Iustin Pop
  node = instance.primary_node
1024 a8083063 Iustin Pop
1025 a8083063 Iustin Pop
  for dev in instance.disks:
1026 a8083063 Iustin Pop
    cfgw.SetDiskID(dev, node)
1027 a8083063 Iustin Pop
1028 a8083063 Iustin Pop
  retries = 0
1029 a8083063 Iustin Pop
  while True:
1030 a8083063 Iustin Pop
    max_time = 0
1031 a8083063 Iustin Pop
    done = True
1032 a8083063 Iustin Pop
    cumul_degraded = False
1033 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_getmirrorstatus(node, instance.disks)
1034 a8083063 Iustin Pop
    if not rstats:
1035 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1036 a8083063 Iustin Pop
      retries += 1
1037 a8083063 Iustin Pop
      if retries >= 10:
1038 3ecf6786 Iustin Pop
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1039 3ecf6786 Iustin Pop
                                 " aborting." % node)
1040 a8083063 Iustin Pop
      time.sleep(6)
1041 a8083063 Iustin Pop
      continue
1042 a8083063 Iustin Pop
    retries = 0
1043 a8083063 Iustin Pop
    for i in range(len(rstats)):
1044 a8083063 Iustin Pop
      mstat = rstats[i]
1045 a8083063 Iustin Pop
      if mstat is None:
1046 a8083063 Iustin Pop
        logger.ToStderr("Can't compute data for node %s/%s" %
1047 a8083063 Iustin Pop
                        (node, instance.disks[i].iv_name))
1048 a8083063 Iustin Pop
        continue
1049 a8083063 Iustin Pop
      perc_done, est_time, is_degraded = mstat
1050 a8083063 Iustin Pop
      cumul_degraded = cumul_degraded or (is_degraded and perc_done is None)
1051 a8083063 Iustin Pop
      if perc_done is not None:
1052 a8083063 Iustin Pop
        done = False
1053 a8083063 Iustin Pop
        if est_time is not None:
1054 a8083063 Iustin Pop
          rem_time = "%d estimated seconds remaining" % est_time
1055 a8083063 Iustin Pop
          max_time = est_time
1056 a8083063 Iustin Pop
        else:
1057 a8083063 Iustin Pop
          rem_time = "no time estimate"
1058 a8083063 Iustin Pop
        logger.ToStdout("- device %s: %5.2f%% done, %s" %
1059 a8083063 Iustin Pop
                        (instance.disks[i].iv_name, perc_done, rem_time))
1060 a8083063 Iustin Pop
    if done or oneshot:
1061 a8083063 Iustin Pop
      break
1062 a8083063 Iustin Pop
1063 a8083063 Iustin Pop
    if unlock:
1064 a8083063 Iustin Pop
      utils.Unlock('cmd')
1065 a8083063 Iustin Pop
    try:
1066 a8083063 Iustin Pop
      time.sleep(min(60, max_time))
1067 a8083063 Iustin Pop
    finally:
1068 a8083063 Iustin Pop
      if unlock:
1069 a8083063 Iustin Pop
        utils.Lock('cmd')
1070 a8083063 Iustin Pop
1071 a8083063 Iustin Pop
  if done:
1072 a8083063 Iustin Pop
    logger.ToStdout("Instance %s's disks are in sync." % instance.name)
1073 a8083063 Iustin Pop
  return not cumul_degraded
1074 a8083063 Iustin Pop
1075 a8083063 Iustin Pop
1076 a8083063 Iustin Pop
def _CheckDiskConsistency(cfgw, dev, node, on_primary):
1077 a8083063 Iustin Pop
  """Check that mirrors are not degraded.
1078 a8083063 Iustin Pop

1079 a8083063 Iustin Pop
  """
1080 a8083063 Iustin Pop
  cfgw.SetDiskID(dev, node)
1081 a8083063 Iustin Pop
1082 a8083063 Iustin Pop
  result = True
1083 a8083063 Iustin Pop
  if on_primary or dev.AssembleOnSecondary():
1084 a8083063 Iustin Pop
    rstats = rpc.call_blockdev_find(node, dev)
1085 a8083063 Iustin Pop
    if not rstats:
1086 a8083063 Iustin Pop
      logger.ToStderr("Can't get any data from node %s" % node)
1087 a8083063 Iustin Pop
      result = False
1088 a8083063 Iustin Pop
    else:
1089 a8083063 Iustin Pop
      result = result and (not rstats[5])
1090 a8083063 Iustin Pop
  if dev.children:
1091 a8083063 Iustin Pop
    for child in dev.children:
1092 a8083063 Iustin Pop
      result = result and _CheckDiskConsistency(cfgw, child, node, on_primary)
1093 a8083063 Iustin Pop
1094 a8083063 Iustin Pop
  return result
1095 a8083063 Iustin Pop
1096 a8083063 Iustin Pop
1097 a8083063 Iustin Pop
class LUDiagnoseOS(NoHooksLU):
1098 a8083063 Iustin Pop
  """Logical unit for OS diagnose/query.
1099 a8083063 Iustin Pop

1100 a8083063 Iustin Pop
  """
1101 a8083063 Iustin Pop
  _OP_REQP = []
1102 a8083063 Iustin Pop
1103 a8083063 Iustin Pop
  def CheckPrereq(self):
1104 a8083063 Iustin Pop
    """Check prerequisites.
1105 a8083063 Iustin Pop

1106 a8083063 Iustin Pop
    This always succeeds, since this is a pure query LU.
1107 a8083063 Iustin Pop

1108 a8083063 Iustin Pop
    """
1109 a8083063 Iustin Pop
    return
1110 a8083063 Iustin Pop
1111 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1112 a8083063 Iustin Pop
    """Compute the list of OSes.
1113 a8083063 Iustin Pop

1114 a8083063 Iustin Pop
    """
1115 a8083063 Iustin Pop
    node_list = self.cfg.GetNodeList()
1116 a8083063 Iustin Pop
    node_data = rpc.call_os_diagnose(node_list)
1117 a8083063 Iustin Pop
    if node_data == False:
1118 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't gather the list of OSes")
1119 a8083063 Iustin Pop
    return node_data
1120 a8083063 Iustin Pop
1121 a8083063 Iustin Pop
1122 a8083063 Iustin Pop
class LURemoveNode(LogicalUnit):
1123 a8083063 Iustin Pop
  """Logical unit for removing a node.
1124 a8083063 Iustin Pop

1125 a8083063 Iustin Pop
  """
1126 a8083063 Iustin Pop
  HPATH = "node-remove"
1127 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1128 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1129 a8083063 Iustin Pop
1130 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1131 a8083063 Iustin Pop
    """Build hooks env.
1132 a8083063 Iustin Pop

1133 a8083063 Iustin Pop
    This doesn't run on the target node in the pre phase as a failed
1134 a8083063 Iustin Pop
    node would not allows itself to run.
1135 a8083063 Iustin Pop

1136 a8083063 Iustin Pop
    """
1137 396e1b78 Michael Hanselmann
    env = {
1138 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1139 396e1b78 Michael Hanselmann
      "NODE_NAME": self.op.node_name,
1140 396e1b78 Michael Hanselmann
      }
1141 a8083063 Iustin Pop
    all_nodes = self.cfg.GetNodeList()
1142 a8083063 Iustin Pop
    all_nodes.remove(self.op.node_name)
1143 396e1b78 Michael Hanselmann
    return env, all_nodes, all_nodes
1144 a8083063 Iustin Pop
1145 a8083063 Iustin Pop
  def CheckPrereq(self):
1146 a8083063 Iustin Pop
    """Check prerequisites.
1147 a8083063 Iustin Pop

1148 a8083063 Iustin Pop
    This checks:
1149 a8083063 Iustin Pop
     - the node exists in the configuration
1150 a8083063 Iustin Pop
     - it does not have primary or secondary instances
1151 a8083063 Iustin Pop
     - it's not the master
1152 a8083063 Iustin Pop

1153 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1154 a8083063 Iustin Pop

1155 a8083063 Iustin Pop
    """
1156 a8083063 Iustin Pop
    node = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.node_name))
1157 a8083063 Iustin Pop
    if node is None:
1158 a02bc76e Iustin Pop
      raise errors.OpPrereqError, ("Node '%s' is unknown." % self.op.node_name)
1159 a8083063 Iustin Pop
1160 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
1161 a8083063 Iustin Pop
1162 880478f8 Iustin Pop
    masternode = self.sstore.GetMasterNode()
1163 a8083063 Iustin Pop
    if node.name == masternode:
1164 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node is the master node,"
1165 3ecf6786 Iustin Pop
                                 " you need to failover first.")
1166 a8083063 Iustin Pop
1167 a8083063 Iustin Pop
    for instance_name in instance_list:
1168 a8083063 Iustin Pop
      instance = self.cfg.GetInstanceInfo(instance_name)
1169 a8083063 Iustin Pop
      if node.name == instance.primary_node:
1170 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s still running on the node,"
1171 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1172 a8083063 Iustin Pop
      if node.name in instance.secondary_nodes:
1173 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Instance %s has node as a secondary,"
1174 3ecf6786 Iustin Pop
                                   " please remove first." % instance_name)
1175 a8083063 Iustin Pop
    self.op.node_name = node.name
1176 a8083063 Iustin Pop
    self.node = node
1177 a8083063 Iustin Pop
1178 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1179 a8083063 Iustin Pop
    """Removes the node from the cluster.
1180 a8083063 Iustin Pop

1181 a8083063 Iustin Pop
    """
1182 a8083063 Iustin Pop
    node = self.node
1183 a8083063 Iustin Pop
    logger.Info("stopping the node daemon and removing configs from node %s" %
1184 a8083063 Iustin Pop
                node.name)
1185 a8083063 Iustin Pop
1186 a8083063 Iustin Pop
    rpc.call_node_leave_cluster(node.name)
1187 a8083063 Iustin Pop
1188 a8083063 Iustin Pop
    ssh.SSHCall(node.name, 'root', "%s stop" % constants.NODE_INITD_SCRIPT)
1189 a8083063 Iustin Pop
1190 a8083063 Iustin Pop
    logger.Info("Removing node %s from config" % node.name)
1191 a8083063 Iustin Pop
1192 a8083063 Iustin Pop
    self.cfg.RemoveNode(node.name)
1193 a8083063 Iustin Pop
1194 a8083063 Iustin Pop
1195 a8083063 Iustin Pop
class LUQueryNodes(NoHooksLU):
1196 a8083063 Iustin Pop
  """Logical unit for querying nodes.
1197 a8083063 Iustin Pop

1198 a8083063 Iustin Pop
  """
1199 246e180a Iustin Pop
  _OP_REQP = ["output_fields", "names"]
1200 a8083063 Iustin Pop
1201 a8083063 Iustin Pop
  def CheckPrereq(self):
1202 a8083063 Iustin Pop
    """Check prerequisites.
1203 a8083063 Iustin Pop

1204 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
1205 a8083063 Iustin Pop

1206 a8083063 Iustin Pop
    """
1207 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["dtotal", "dfree",
1208 3ef10550 Michael Hanselmann
                                     "mtotal", "mnode", "mfree",
1209 3ef10550 Michael Hanselmann
                                     "bootid"])
1210 a8083063 Iustin Pop
1211 ec223efb Iustin Pop
    _CheckOutputFields(static=["name", "pinst_cnt", "sinst_cnt",
1212 ec223efb Iustin Pop
                               "pinst_list", "sinst_list",
1213 ec223efb Iustin Pop
                               "pip", "sip"],
1214 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
1215 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1216 a8083063 Iustin Pop
1217 246e180a Iustin Pop
    self.wanted = _GetWantedNodes(self, self.op.names)
1218 a8083063 Iustin Pop
1219 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1220 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
1221 a8083063 Iustin Pop

1222 a8083063 Iustin Pop
    """
1223 246e180a Iustin Pop
    nodenames = self.wanted
1224 a8083063 Iustin Pop
    nodelist = [self.cfg.GetNodeInfo(name) for name in nodenames]
1225 a8083063 Iustin Pop
1226 a8083063 Iustin Pop
    # begin data gathering
1227 a8083063 Iustin Pop
1228 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
1229 a8083063 Iustin Pop
      live_data = {}
1230 a8083063 Iustin Pop
      node_data = rpc.call_node_info(nodenames, self.cfg.GetVGName())
1231 a8083063 Iustin Pop
      for name in nodenames:
1232 a8083063 Iustin Pop
        nodeinfo = node_data.get(name, None)
1233 a8083063 Iustin Pop
        if nodeinfo:
1234 a8083063 Iustin Pop
          live_data[name] = {
1235 a8083063 Iustin Pop
            "mtotal": utils.TryConvert(int, nodeinfo['memory_total']),
1236 a8083063 Iustin Pop
            "mnode": utils.TryConvert(int, nodeinfo['memory_dom0']),
1237 a8083063 Iustin Pop
            "mfree": utils.TryConvert(int, nodeinfo['memory_free']),
1238 a8083063 Iustin Pop
            "dtotal": utils.TryConvert(int, nodeinfo['vg_size']),
1239 a8083063 Iustin Pop
            "dfree": utils.TryConvert(int, nodeinfo['vg_free']),
1240 3ef10550 Michael Hanselmann
            "bootid": nodeinfo['bootid'],
1241 a8083063 Iustin Pop
            }
1242 a8083063 Iustin Pop
        else:
1243 a8083063 Iustin Pop
          live_data[name] = {}
1244 a8083063 Iustin Pop
    else:
1245 a8083063 Iustin Pop
      live_data = dict.fromkeys(nodenames, {})
1246 a8083063 Iustin Pop
1247 ec223efb Iustin Pop
    node_to_primary = dict([(name, set()) for name in nodenames])
1248 ec223efb Iustin Pop
    node_to_secondary = dict([(name, set()) for name in nodenames])
1249 a8083063 Iustin Pop
1250 ec223efb Iustin Pop
    inst_fields = frozenset(("pinst_cnt", "pinst_list",
1251 ec223efb Iustin Pop
                             "sinst_cnt", "sinst_list"))
1252 ec223efb Iustin Pop
    if inst_fields & frozenset(self.op.output_fields):
1253 a8083063 Iustin Pop
      instancelist = self.cfg.GetInstanceList()
1254 a8083063 Iustin Pop
1255 ec223efb Iustin Pop
      for instance_name in instancelist:
1256 ec223efb Iustin Pop
        inst = self.cfg.GetInstanceInfo(instance_name)
1257 ec223efb Iustin Pop
        if inst.primary_node in node_to_primary:
1258 ec223efb Iustin Pop
          node_to_primary[inst.primary_node].add(inst.name)
1259 ec223efb Iustin Pop
        for secnode in inst.secondary_nodes:
1260 ec223efb Iustin Pop
          if secnode in node_to_secondary:
1261 ec223efb Iustin Pop
            node_to_secondary[secnode].add(inst.name)
1262 a8083063 Iustin Pop
1263 a8083063 Iustin Pop
    # end data gathering
1264 a8083063 Iustin Pop
1265 a8083063 Iustin Pop
    output = []
1266 a8083063 Iustin Pop
    for node in nodelist:
1267 a8083063 Iustin Pop
      node_output = []
1268 a8083063 Iustin Pop
      for field in self.op.output_fields:
1269 a8083063 Iustin Pop
        if field == "name":
1270 a8083063 Iustin Pop
          val = node.name
1271 ec223efb Iustin Pop
        elif field == "pinst_list":
1272 ec223efb Iustin Pop
          val = list(node_to_primary[node.name])
1273 ec223efb Iustin Pop
        elif field == "sinst_list":
1274 ec223efb Iustin Pop
          val = list(node_to_secondary[node.name])
1275 ec223efb Iustin Pop
        elif field == "pinst_cnt":
1276 ec223efb Iustin Pop
          val = len(node_to_primary[node.name])
1277 ec223efb Iustin Pop
        elif field == "sinst_cnt":
1278 ec223efb Iustin Pop
          val = len(node_to_secondary[node.name])
1279 a8083063 Iustin Pop
        elif field == "pip":
1280 a8083063 Iustin Pop
          val = node.primary_ip
1281 a8083063 Iustin Pop
        elif field == "sip":
1282 a8083063 Iustin Pop
          val = node.secondary_ip
1283 a8083063 Iustin Pop
        elif field in self.dynamic_fields:
1284 ec223efb Iustin Pop
          val = live_data[node.name].get(field, None)
1285 a8083063 Iustin Pop
        else:
1286 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
1287 a8083063 Iustin Pop
        node_output.append(val)
1288 a8083063 Iustin Pop
      output.append(node_output)
1289 a8083063 Iustin Pop
1290 a8083063 Iustin Pop
    return output
1291 a8083063 Iustin Pop
1292 a8083063 Iustin Pop
1293 dcb93971 Michael Hanselmann
class LUQueryNodeVolumes(NoHooksLU):
1294 dcb93971 Michael Hanselmann
  """Logical unit for getting volumes on node(s).
1295 dcb93971 Michael Hanselmann

1296 dcb93971 Michael Hanselmann
  """
1297 dcb93971 Michael Hanselmann
  _OP_REQP = ["nodes", "output_fields"]
1298 dcb93971 Michael Hanselmann
1299 dcb93971 Michael Hanselmann
  def CheckPrereq(self):
1300 dcb93971 Michael Hanselmann
    """Check prerequisites.
1301 dcb93971 Michael Hanselmann

1302 dcb93971 Michael Hanselmann
    This checks that the fields required are valid output fields.
1303 dcb93971 Michael Hanselmann

1304 dcb93971 Michael Hanselmann
    """
1305 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1306 dcb93971 Michael Hanselmann
1307 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["node"],
1308 dcb93971 Michael Hanselmann
                       dynamic=["phys", "vg", "name", "size", "instance"],
1309 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
1310 dcb93971 Michael Hanselmann
1311 dcb93971 Michael Hanselmann
1312 dcb93971 Michael Hanselmann
  def Exec(self, feedback_fn):
1313 dcb93971 Michael Hanselmann
    """Computes the list of nodes and their attributes.
1314 dcb93971 Michael Hanselmann

1315 dcb93971 Michael Hanselmann
    """
1316 a7ba5e53 Iustin Pop
    nodenames = self.nodes
1317 dcb93971 Michael Hanselmann
    volumes = rpc.call_node_volumes(nodenames)
1318 dcb93971 Michael Hanselmann
1319 dcb93971 Michael Hanselmann
    ilist = [self.cfg.GetInstanceInfo(iname) for iname
1320 dcb93971 Michael Hanselmann
             in self.cfg.GetInstanceList()]
1321 dcb93971 Michael Hanselmann
1322 dcb93971 Michael Hanselmann
    lv_by_node = dict([(inst, inst.MapLVsByNode()) for inst in ilist])
1323 dcb93971 Michael Hanselmann
1324 dcb93971 Michael Hanselmann
    output = []
1325 dcb93971 Michael Hanselmann
    for node in nodenames:
1326 37d19eb2 Michael Hanselmann
      if node not in volumes or not volumes[node]:
1327 37d19eb2 Michael Hanselmann
        continue
1328 37d19eb2 Michael Hanselmann
1329 dcb93971 Michael Hanselmann
      node_vols = volumes[node][:]
1330 dcb93971 Michael Hanselmann
      node_vols.sort(key=lambda vol: vol['dev'])
1331 dcb93971 Michael Hanselmann
1332 dcb93971 Michael Hanselmann
      for vol in node_vols:
1333 dcb93971 Michael Hanselmann
        node_output = []
1334 dcb93971 Michael Hanselmann
        for field in self.op.output_fields:
1335 dcb93971 Michael Hanselmann
          if field == "node":
1336 dcb93971 Michael Hanselmann
            val = node
1337 dcb93971 Michael Hanselmann
          elif field == "phys":
1338 dcb93971 Michael Hanselmann
            val = vol['dev']
1339 dcb93971 Michael Hanselmann
          elif field == "vg":
1340 dcb93971 Michael Hanselmann
            val = vol['vg']
1341 dcb93971 Michael Hanselmann
          elif field == "name":
1342 dcb93971 Michael Hanselmann
            val = vol['name']
1343 dcb93971 Michael Hanselmann
          elif field == "size":
1344 dcb93971 Michael Hanselmann
            val = int(float(vol['size']))
1345 dcb93971 Michael Hanselmann
          elif field == "instance":
1346 dcb93971 Michael Hanselmann
            for inst in ilist:
1347 dcb93971 Michael Hanselmann
              if node not in lv_by_node[inst]:
1348 dcb93971 Michael Hanselmann
                continue
1349 dcb93971 Michael Hanselmann
              if vol['name'] in lv_by_node[inst][node]:
1350 dcb93971 Michael Hanselmann
                val = inst.name
1351 dcb93971 Michael Hanselmann
                break
1352 dcb93971 Michael Hanselmann
            else:
1353 dcb93971 Michael Hanselmann
              val = '-'
1354 dcb93971 Michael Hanselmann
          else:
1355 3ecf6786 Iustin Pop
            raise errors.ParameterError(field)
1356 dcb93971 Michael Hanselmann
          node_output.append(str(val))
1357 dcb93971 Michael Hanselmann
1358 dcb93971 Michael Hanselmann
        output.append(node_output)
1359 dcb93971 Michael Hanselmann
1360 dcb93971 Michael Hanselmann
    return output
1361 dcb93971 Michael Hanselmann
1362 dcb93971 Michael Hanselmann
1363 a8083063 Iustin Pop
class LUAddNode(LogicalUnit):
1364 a8083063 Iustin Pop
  """Logical unit for adding node to the cluster.
1365 a8083063 Iustin Pop

1366 a8083063 Iustin Pop
  """
1367 a8083063 Iustin Pop
  HPATH = "node-add"
1368 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_NODE
1369 a8083063 Iustin Pop
  _OP_REQP = ["node_name"]
1370 a8083063 Iustin Pop
1371 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1372 a8083063 Iustin Pop
    """Build hooks env.
1373 a8083063 Iustin Pop

1374 a8083063 Iustin Pop
    This will run on all nodes before, and on all nodes + the new node after.
1375 a8083063 Iustin Pop

1376 a8083063 Iustin Pop
    """
1377 a8083063 Iustin Pop
    env = {
1378 0e137c28 Iustin Pop
      "OP_TARGET": self.op.node_name,
1379 a8083063 Iustin Pop
      "NODE_NAME": self.op.node_name,
1380 a8083063 Iustin Pop
      "NODE_PIP": self.op.primary_ip,
1381 a8083063 Iustin Pop
      "NODE_SIP": self.op.secondary_ip,
1382 a8083063 Iustin Pop
      }
1383 a8083063 Iustin Pop
    nodes_0 = self.cfg.GetNodeList()
1384 a8083063 Iustin Pop
    nodes_1 = nodes_0 + [self.op.node_name, ]
1385 a8083063 Iustin Pop
    return env, nodes_0, nodes_1
1386 a8083063 Iustin Pop
1387 a8083063 Iustin Pop
  def CheckPrereq(self):
1388 a8083063 Iustin Pop
    """Check prerequisites.
1389 a8083063 Iustin Pop

1390 a8083063 Iustin Pop
    This checks:
1391 a8083063 Iustin Pop
     - the new node is not already in the config
1392 a8083063 Iustin Pop
     - it is resolvable
1393 a8083063 Iustin Pop
     - its parameters (single/dual homed) matches the cluster
1394 a8083063 Iustin Pop

1395 a8083063 Iustin Pop
    Any errors are signalled by raising errors.OpPrereqError.
1396 a8083063 Iustin Pop

1397 a8083063 Iustin Pop
    """
1398 a8083063 Iustin Pop
    node_name = self.op.node_name
1399 a8083063 Iustin Pop
    cfg = self.cfg
1400 a8083063 Iustin Pop
1401 89e1fc26 Iustin Pop
    dns_data = utils.HostInfo(node_name)
1402 a8083063 Iustin Pop
1403 bcf043c9 Iustin Pop
    node = dns_data.name
1404 bcf043c9 Iustin Pop
    primary_ip = self.op.primary_ip = dns_data.ip
1405 a8083063 Iustin Pop
    secondary_ip = getattr(self.op, "secondary_ip", None)
1406 a8083063 Iustin Pop
    if secondary_ip is None:
1407 a8083063 Iustin Pop
      secondary_ip = primary_ip
1408 a8083063 Iustin Pop
    if not utils.IsValidIP(secondary_ip):
1409 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid secondary IP given")
1410 a8083063 Iustin Pop
    self.op.secondary_ip = secondary_ip
1411 a8083063 Iustin Pop
    node_list = cfg.GetNodeList()
1412 a8083063 Iustin Pop
    if node in node_list:
1413 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node %s is already in the configuration"
1414 3ecf6786 Iustin Pop
                                 % node)
1415 a8083063 Iustin Pop
1416 a8083063 Iustin Pop
    for existing_node_name in node_list:
1417 a8083063 Iustin Pop
      existing_node = cfg.GetNodeInfo(existing_node_name)
1418 a8083063 Iustin Pop
      if (existing_node.primary_ip == primary_ip or
1419 a8083063 Iustin Pop
          existing_node.secondary_ip == primary_ip or
1420 a8083063 Iustin Pop
          existing_node.primary_ip == secondary_ip or
1421 a8083063 Iustin Pop
          existing_node.secondary_ip == secondary_ip):
1422 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("New node ip address(es) conflict with"
1423 3ecf6786 Iustin Pop
                                   " existing node %s" % existing_node.name)
1424 a8083063 Iustin Pop
1425 a8083063 Iustin Pop
    # check that the type of the node (single versus dual homed) is the
1426 a8083063 Iustin Pop
    # same as for the master
1427 880478f8 Iustin Pop
    myself = cfg.GetNodeInfo(self.sstore.GetMasterNode())
1428 a8083063 Iustin Pop
    master_singlehomed = myself.secondary_ip == myself.primary_ip
1429 a8083063 Iustin Pop
    newbie_singlehomed = secondary_ip == primary_ip
1430 a8083063 Iustin Pop
    if master_singlehomed != newbie_singlehomed:
1431 a8083063 Iustin Pop
      if master_singlehomed:
1432 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has no private ip but the"
1433 3ecf6786 Iustin Pop
                                   " new node has one")
1434 a8083063 Iustin Pop
      else:
1435 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The master has a private ip but the"
1436 3ecf6786 Iustin Pop
                                   " new node doesn't have one")
1437 a8083063 Iustin Pop
1438 a8083063 Iustin Pop
    # checks reachablity
1439 16abfbc2 Alexander Schreiber
    if not utils.TcpPing(utils.HostInfo().name,
1440 16abfbc2 Alexander Schreiber
                         primary_ip,
1441 16abfbc2 Alexander Schreiber
                         constants.DEFAULT_NODED_PORT):
1442 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node not reachable by ping")
1443 a8083063 Iustin Pop
1444 a8083063 Iustin Pop
    if not newbie_singlehomed:
1445 a8083063 Iustin Pop
      # check reachability from my secondary ip to newbie's secondary ip
1446 16abfbc2 Alexander Schreiber
      if not utils.TcpPing(myself.secondary_ip,
1447 16abfbc2 Alexander Schreiber
                           secondary_ip,
1448 16abfbc2 Alexander Schreiber
                           constants.DEFAULT_NODED_PORT):
1449 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError(
1450 16abfbc2 Alexander Schreiber
          "Node secondary ip not reachable by TCP based ping to noded port")
1451 a8083063 Iustin Pop
1452 a8083063 Iustin Pop
    self.new_node = objects.Node(name=node,
1453 a8083063 Iustin Pop
                                 primary_ip=primary_ip,
1454 a8083063 Iustin Pop
                                 secondary_ip=secondary_ip)
1455 a8083063 Iustin Pop
1456 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1457 a8083063 Iustin Pop
    """Adds the new node to the cluster.
1458 a8083063 Iustin Pop

1459 a8083063 Iustin Pop
    """
1460 a8083063 Iustin Pop
    new_node = self.new_node
1461 a8083063 Iustin Pop
    node = new_node.name
1462 a8083063 Iustin Pop
1463 a8083063 Iustin Pop
    # set up inter-node password and certificate and restarts the node daemon
1464 a8083063 Iustin Pop
    gntpass = self.sstore.GetNodeDaemonPassword()
1465 a8083063 Iustin Pop
    if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
1466 3ecf6786 Iustin Pop
      raise errors.OpExecError("ganeti password corruption detected")
1467 a8083063 Iustin Pop
    f = open(constants.SSL_CERT_FILE)
1468 a8083063 Iustin Pop
    try:
1469 a8083063 Iustin Pop
      gntpem = f.read(8192)
1470 a8083063 Iustin Pop
    finally:
1471 a8083063 Iustin Pop
      f.close()
1472 a8083063 Iustin Pop
    # in the base64 pem encoding, neither '!' nor '.' are valid chars,
1473 a8083063 Iustin Pop
    # so we use this to detect an invalid certificate; as long as the
1474 a8083063 Iustin Pop
    # cert doesn't contain this, the here-document will be correctly
1475 a8083063 Iustin Pop
    # parsed by the shell sequence below
1476 a8083063 Iustin Pop
    if re.search('^!EOF\.', gntpem, re.MULTILINE):
1477 3ecf6786 Iustin Pop
      raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
1478 a8083063 Iustin Pop
    if not gntpem.endswith("\n"):
1479 3ecf6786 Iustin Pop
      raise errors.OpExecError("PEM must end with newline")
1480 a8083063 Iustin Pop
    logger.Info("copy cluster pass to %s and starting the node daemon" % node)
1481 a8083063 Iustin Pop
1482 a8083063 Iustin Pop
    # and then connect with ssh to set password and start ganeti-noded
1483 a8083063 Iustin Pop
    # note that all the below variables are sanitized at this point,
1484 a8083063 Iustin Pop
    # either by being constants or by the checks above
1485 a8083063 Iustin Pop
    ss = self.sstore
1486 a8083063 Iustin Pop
    mycommand = ("umask 077 && "
1487 a8083063 Iustin Pop
                 "echo '%s' > '%s' && "
1488 a8083063 Iustin Pop
                 "cat > '%s' << '!EOF.' && \n"
1489 a8083063 Iustin Pop
                 "%s!EOF.\n%s restart" %
1490 a8083063 Iustin Pop
                 (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
1491 a8083063 Iustin Pop
                  constants.SSL_CERT_FILE, gntpem,
1492 a8083063 Iustin Pop
                  constants.NODE_INITD_SCRIPT))
1493 a8083063 Iustin Pop
1494 a8083063 Iustin Pop
    result = ssh.SSHCall(node, 'root', mycommand, batch=False, ask_key=True)
1495 a8083063 Iustin Pop
    if result.failed:
1496 3ecf6786 Iustin Pop
      raise errors.OpExecError("Remote command on node %s, error: %s,"
1497 3ecf6786 Iustin Pop
                               " output: %s" %
1498 3ecf6786 Iustin Pop
                               (node, result.fail_reason, result.output))
1499 a8083063 Iustin Pop
1500 a8083063 Iustin Pop
    # check connectivity
1501 a8083063 Iustin Pop
    time.sleep(4)
1502 a8083063 Iustin Pop
1503 a8083063 Iustin Pop
    result = rpc.call_version([node])[node]
1504 a8083063 Iustin Pop
    if result:
1505 a8083063 Iustin Pop
      if constants.PROTOCOL_VERSION == result:
1506 a8083063 Iustin Pop
        logger.Info("communication to node %s fine, sw version %s match" %
1507 a8083063 Iustin Pop
                    (node, result))
1508 a8083063 Iustin Pop
      else:
1509 3ecf6786 Iustin Pop
        raise errors.OpExecError("Version mismatch master version %s,"
1510 3ecf6786 Iustin Pop
                                 " node version %s" %
1511 3ecf6786 Iustin Pop
                                 (constants.PROTOCOL_VERSION, result))
1512 a8083063 Iustin Pop
    else:
1513 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot get version from the new node")
1514 a8083063 Iustin Pop
1515 a8083063 Iustin Pop
    # setup ssh on node
1516 a8083063 Iustin Pop
    logger.Info("copy ssh key to node %s" % node)
1517 70d9e3d8 Iustin Pop
    priv_key, pub_key, _ = ssh.GetUserFiles(constants.GANETI_RUNAS)
1518 a8083063 Iustin Pop
    keyarray = []
1519 70d9e3d8 Iustin Pop
    keyfiles = [constants.SSH_HOST_DSA_PRIV, constants.SSH_HOST_DSA_PUB,
1520 70d9e3d8 Iustin Pop
                constants.SSH_HOST_RSA_PRIV, constants.SSH_HOST_RSA_PUB,
1521 70d9e3d8 Iustin Pop
                priv_key, pub_key]
1522 a8083063 Iustin Pop
1523 a8083063 Iustin Pop
    for i in keyfiles:
1524 a8083063 Iustin Pop
      f = open(i, 'r')
1525 a8083063 Iustin Pop
      try:
1526 a8083063 Iustin Pop
        keyarray.append(f.read())
1527 a8083063 Iustin Pop
      finally:
1528 a8083063 Iustin Pop
        f.close()
1529 a8083063 Iustin Pop
1530 a8083063 Iustin Pop
    result = rpc.call_node_add(node, keyarray[0], keyarray[1], keyarray[2],
1531 a8083063 Iustin Pop
                               keyarray[3], keyarray[4], keyarray[5])
1532 a8083063 Iustin Pop
1533 a8083063 Iustin Pop
    if not result:
1534 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot transfer ssh keys to the new node")
1535 a8083063 Iustin Pop
1536 a8083063 Iustin Pop
    # Add node to our /etc/hosts, and add key to known_hosts
1537 a8083063 Iustin Pop
    _UpdateEtcHosts(new_node.name, new_node.primary_ip)
1538 a8083063 Iustin Pop
    _UpdateKnownHosts(new_node.name, new_node.primary_ip,
1539 a8083063 Iustin Pop
                      self.cfg.GetHostKey())
1540 a8083063 Iustin Pop
1541 a8083063 Iustin Pop
    if new_node.secondary_ip != new_node.primary_ip:
1542 16abfbc2 Alexander Schreiber
      if not rpc.call_node_tcp_ping(new_node.name,
1543 16abfbc2 Alexander Schreiber
                                    constants.LOCALHOST_IP_ADDRESS,
1544 16abfbc2 Alexander Schreiber
                                    new_node.secondary_ip,
1545 16abfbc2 Alexander Schreiber
                                    constants.DEFAULT_NODED_PORT,
1546 16abfbc2 Alexander Schreiber
                                    10, False):
1547 3ecf6786 Iustin Pop
        raise errors.OpExecError("Node claims it doesn't have the"
1548 3ecf6786 Iustin Pop
                                 " secondary ip you gave (%s).\n"
1549 3ecf6786 Iustin Pop
                                 "Please fix and re-run this command." %
1550 3ecf6786 Iustin Pop
                                 new_node.secondary_ip)
1551 a8083063 Iustin Pop
1552 ff98055b Iustin Pop
    success, msg = ssh.VerifyNodeHostname(node)
1553 ff98055b Iustin Pop
    if not success:
1554 ff98055b Iustin Pop
      raise errors.OpExecError("Node '%s' claims it has a different hostname"
1555 ff98055b Iustin Pop
                               " than the one the resolver gives: %s.\n"
1556 ff98055b Iustin Pop
                               "Please fix and re-run this command." %
1557 ff98055b Iustin Pop
                               (node, msg))
1558 ff98055b Iustin Pop
1559 a8083063 Iustin Pop
    # Distribute updated /etc/hosts and known_hosts to all nodes,
1560 a8083063 Iustin Pop
    # including the node just added
1561 880478f8 Iustin Pop
    myself = self.cfg.GetNodeInfo(self.sstore.GetMasterNode())
1562 a8083063 Iustin Pop
    dist_nodes = self.cfg.GetNodeList() + [node]
1563 a8083063 Iustin Pop
    if myself.name in dist_nodes:
1564 a8083063 Iustin Pop
      dist_nodes.remove(myself.name)
1565 a8083063 Iustin Pop
1566 a8083063 Iustin Pop
    logger.Debug("Copying hosts and known_hosts to all nodes")
1567 82122173 Iustin Pop
    for fname in ("/etc/hosts", constants.SSH_KNOWN_HOSTS_FILE):
1568 a8083063 Iustin Pop
      result = rpc.call_upload_file(dist_nodes, fname)
1569 a8083063 Iustin Pop
      for to_node in dist_nodes:
1570 a8083063 Iustin Pop
        if not result[to_node]:
1571 a8083063 Iustin Pop
          logger.Error("copy of file %s to node %s failed" %
1572 a8083063 Iustin Pop
                       (fname, to_node))
1573 a8083063 Iustin Pop
1574 cb91d46e Iustin Pop
    to_copy = ss.GetFileList()
1575 a8083063 Iustin Pop
    for fname in to_copy:
1576 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, fname):
1577 a8083063 Iustin Pop
        logger.Error("could not copy file %s to node %s" % (fname, node))
1578 a8083063 Iustin Pop
1579 a8083063 Iustin Pop
    logger.Info("adding node %s to cluster.conf" % node)
1580 a8083063 Iustin Pop
    self.cfg.AddNode(new_node)
1581 a8083063 Iustin Pop
1582 a8083063 Iustin Pop
1583 a8083063 Iustin Pop
class LUMasterFailover(LogicalUnit):
1584 a8083063 Iustin Pop
  """Failover the master node to the current node.
1585 a8083063 Iustin Pop

1586 a8083063 Iustin Pop
  This is a special LU in that it must run on a non-master node.
1587 a8083063 Iustin Pop

1588 a8083063 Iustin Pop
  """
1589 a8083063 Iustin Pop
  HPATH = "master-failover"
1590 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_CLUSTER
1591 a8083063 Iustin Pop
  REQ_MASTER = False
1592 a8083063 Iustin Pop
  _OP_REQP = []
1593 a8083063 Iustin Pop
1594 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1595 a8083063 Iustin Pop
    """Build hooks env.
1596 a8083063 Iustin Pop

1597 a8083063 Iustin Pop
    This will run on the new master only in the pre phase, and on all
1598 a8083063 Iustin Pop
    the nodes in the post phase.
1599 a8083063 Iustin Pop

1600 a8083063 Iustin Pop
    """
1601 a8083063 Iustin Pop
    env = {
1602 0e137c28 Iustin Pop
      "OP_TARGET": self.new_master,
1603 a8083063 Iustin Pop
      "NEW_MASTER": self.new_master,
1604 a8083063 Iustin Pop
      "OLD_MASTER": self.old_master,
1605 a8083063 Iustin Pop
      }
1606 a8083063 Iustin Pop
    return env, [self.new_master], self.cfg.GetNodeList()
1607 a8083063 Iustin Pop
1608 a8083063 Iustin Pop
  def CheckPrereq(self):
1609 a8083063 Iustin Pop
    """Check prerequisites.
1610 a8083063 Iustin Pop

1611 a8083063 Iustin Pop
    This checks that we are not already the master.
1612 a8083063 Iustin Pop

1613 a8083063 Iustin Pop
    """
1614 89e1fc26 Iustin Pop
    self.new_master = utils.HostInfo().name
1615 880478f8 Iustin Pop
    self.old_master = self.sstore.GetMasterNode()
1616 a8083063 Iustin Pop
1617 a8083063 Iustin Pop
    if self.old_master == self.new_master:
1618 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("This commands must be run on the node"
1619 3ecf6786 Iustin Pop
                                 " where you want the new master to be.\n"
1620 3ecf6786 Iustin Pop
                                 "%s is already the master" %
1621 3ecf6786 Iustin Pop
                                 self.old_master)
1622 a8083063 Iustin Pop
1623 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1624 a8083063 Iustin Pop
    """Failover the master node.
1625 a8083063 Iustin Pop

1626 a8083063 Iustin Pop
    This command, when run on a non-master node, will cause the current
1627 a8083063 Iustin Pop
    master to cease being master, and the non-master to become new
1628 a8083063 Iustin Pop
    master.
1629 a8083063 Iustin Pop

1630 a8083063 Iustin Pop
    """
1631 a8083063 Iustin Pop
    #TODO: do not rely on gethostname returning the FQDN
1632 a8083063 Iustin Pop
    logger.Info("setting master to %s, old master: %s" %
1633 a8083063 Iustin Pop
                (self.new_master, self.old_master))
1634 a8083063 Iustin Pop
1635 a8083063 Iustin Pop
    if not rpc.call_node_stop_master(self.old_master):
1636 a8083063 Iustin Pop
      logger.Error("could disable the master role on the old master"
1637 a8083063 Iustin Pop
                   " %s, please disable manually" % self.old_master)
1638 a8083063 Iustin Pop
1639 880478f8 Iustin Pop
    ss = self.sstore
1640 880478f8 Iustin Pop
    ss.SetKey(ss.SS_MASTER_NODE, self.new_master)
1641 880478f8 Iustin Pop
    if not rpc.call_upload_file(self.cfg.GetNodeList(),
1642 880478f8 Iustin Pop
                                ss.KeyToFilename(ss.SS_MASTER_NODE)):
1643 880478f8 Iustin Pop
      logger.Error("could not distribute the new simple store master file"
1644 880478f8 Iustin Pop
                   " to the other nodes, please check.")
1645 880478f8 Iustin Pop
1646 a8083063 Iustin Pop
    if not rpc.call_node_start_master(self.new_master):
1647 a8083063 Iustin Pop
      logger.Error("could not start the master role on the new master"
1648 a8083063 Iustin Pop
                   " %s, please check" % self.new_master)
1649 880478f8 Iustin Pop
      feedback_fn("Error in activating the master IP on the new master,\n"
1650 880478f8 Iustin Pop
                  "please fix manually.")
1651 a8083063 Iustin Pop
1652 a8083063 Iustin Pop
1653 a8083063 Iustin Pop
1654 a8083063 Iustin Pop
class LUQueryClusterInfo(NoHooksLU):
1655 a8083063 Iustin Pop
  """Query cluster configuration.
1656 a8083063 Iustin Pop

1657 a8083063 Iustin Pop
  """
1658 a8083063 Iustin Pop
  _OP_REQP = []
1659 59322403 Iustin Pop
  REQ_MASTER = False
1660 a8083063 Iustin Pop
1661 a8083063 Iustin Pop
  def CheckPrereq(self):
1662 a8083063 Iustin Pop
    """No prerequsites needed for this LU.
1663 a8083063 Iustin Pop

1664 a8083063 Iustin Pop
    """
1665 a8083063 Iustin Pop
    pass
1666 a8083063 Iustin Pop
1667 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1668 a8083063 Iustin Pop
    """Return cluster config.
1669 a8083063 Iustin Pop

1670 a8083063 Iustin Pop
    """
1671 a8083063 Iustin Pop
    result = {
1672 5fcdc80d Iustin Pop
      "name": self.sstore.GetClusterName(),
1673 a8083063 Iustin Pop
      "software_version": constants.RELEASE_VERSION,
1674 a8083063 Iustin Pop
      "protocol_version": constants.PROTOCOL_VERSION,
1675 a8083063 Iustin Pop
      "config_version": constants.CONFIG_VERSION,
1676 a8083063 Iustin Pop
      "os_api_version": constants.OS_API_VERSION,
1677 a8083063 Iustin Pop
      "export_version": constants.EXPORT_VERSION,
1678 880478f8 Iustin Pop
      "master": self.sstore.GetMasterNode(),
1679 a8083063 Iustin Pop
      "architecture": (platform.architecture()[0], platform.machine()),
1680 a8083063 Iustin Pop
      }
1681 a8083063 Iustin Pop
1682 a8083063 Iustin Pop
    return result
1683 a8083063 Iustin Pop
1684 a8083063 Iustin Pop
1685 a8083063 Iustin Pop
class LUClusterCopyFile(NoHooksLU):
1686 a8083063 Iustin Pop
  """Copy file to cluster.
1687 a8083063 Iustin Pop

1688 a8083063 Iustin Pop
  """
1689 a8083063 Iustin Pop
  _OP_REQP = ["nodes", "filename"]
1690 a8083063 Iustin Pop
1691 a8083063 Iustin Pop
  def CheckPrereq(self):
1692 a8083063 Iustin Pop
    """Check prerequisites.
1693 a8083063 Iustin Pop

1694 a8083063 Iustin Pop
    It should check that the named file exists and that the given list
1695 a8083063 Iustin Pop
    of nodes is valid.
1696 a8083063 Iustin Pop

1697 a8083063 Iustin Pop
    """
1698 a8083063 Iustin Pop
    if not os.path.exists(self.op.filename):
1699 a8083063 Iustin Pop
      raise errors.OpPrereqError("No such filename '%s'" % self.op.filename)
1700 dcb93971 Michael Hanselmann
1701 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1702 a8083063 Iustin Pop
1703 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1704 a8083063 Iustin Pop
    """Copy a file from master to some nodes.
1705 a8083063 Iustin Pop

1706 a8083063 Iustin Pop
    Args:
1707 a8083063 Iustin Pop
      opts - class with options as members
1708 a8083063 Iustin Pop
      args - list containing a single element, the file name
1709 a8083063 Iustin Pop
    Opts used:
1710 a8083063 Iustin Pop
      nodes - list containing the name of target nodes; if empty, all nodes
1711 a8083063 Iustin Pop

1712 a8083063 Iustin Pop
    """
1713 a8083063 Iustin Pop
    filename = self.op.filename
1714 a8083063 Iustin Pop
1715 89e1fc26 Iustin Pop
    myname = utils.HostInfo().name
1716 a8083063 Iustin Pop
1717 a7ba5e53 Iustin Pop
    for node in self.nodes:
1718 a8083063 Iustin Pop
      if node == myname:
1719 a8083063 Iustin Pop
        continue
1720 a8083063 Iustin Pop
      if not ssh.CopyFileToNode(node, filename):
1721 a8083063 Iustin Pop
        logger.Error("Copy of file %s to node %s failed" % (filename, node))
1722 a8083063 Iustin Pop
1723 a8083063 Iustin Pop
1724 a8083063 Iustin Pop
class LUDumpClusterConfig(NoHooksLU):
1725 a8083063 Iustin Pop
  """Return a text-representation of the cluster-config.
1726 a8083063 Iustin Pop

1727 a8083063 Iustin Pop
  """
1728 a8083063 Iustin Pop
  _OP_REQP = []
1729 a8083063 Iustin Pop
1730 a8083063 Iustin Pop
  def CheckPrereq(self):
1731 a8083063 Iustin Pop
    """No prerequisites.
1732 a8083063 Iustin Pop

1733 a8083063 Iustin Pop
    """
1734 a8083063 Iustin Pop
    pass
1735 a8083063 Iustin Pop
1736 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1737 a8083063 Iustin Pop
    """Dump a representation of the cluster config to the standard output.
1738 a8083063 Iustin Pop

1739 a8083063 Iustin Pop
    """
1740 a8083063 Iustin Pop
    return self.cfg.DumpConfig()
1741 a8083063 Iustin Pop
1742 a8083063 Iustin Pop
1743 a8083063 Iustin Pop
class LURunClusterCommand(NoHooksLU):
1744 a8083063 Iustin Pop
  """Run a command on some nodes.
1745 a8083063 Iustin Pop

1746 a8083063 Iustin Pop
  """
1747 a8083063 Iustin Pop
  _OP_REQP = ["command", "nodes"]
1748 a8083063 Iustin Pop
1749 a8083063 Iustin Pop
  def CheckPrereq(self):
1750 a8083063 Iustin Pop
    """Check prerequisites.
1751 a8083063 Iustin Pop

1752 a8083063 Iustin Pop
    It checks that the given list of nodes is valid.
1753 a8083063 Iustin Pop

1754 a8083063 Iustin Pop
    """
1755 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, self.op.nodes)
1756 a8083063 Iustin Pop
1757 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1758 a8083063 Iustin Pop
    """Run a command on some nodes.
1759 a8083063 Iustin Pop

1760 a8083063 Iustin Pop
    """
1761 a8083063 Iustin Pop
    data = []
1762 a8083063 Iustin Pop
    for node in self.nodes:
1763 a7ba5e53 Iustin Pop
      result = ssh.SSHCall(node, "root", self.op.command)
1764 a7ba5e53 Iustin Pop
      data.append((node, result.output, result.exit_code))
1765 a8083063 Iustin Pop
1766 a8083063 Iustin Pop
    return data
1767 a8083063 Iustin Pop
1768 a8083063 Iustin Pop
1769 a8083063 Iustin Pop
class LUActivateInstanceDisks(NoHooksLU):
1770 a8083063 Iustin Pop
  """Bring up an instance's disks.
1771 a8083063 Iustin Pop

1772 a8083063 Iustin Pop
  """
1773 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1774 a8083063 Iustin Pop
1775 a8083063 Iustin Pop
  def CheckPrereq(self):
1776 a8083063 Iustin Pop
    """Check prerequisites.
1777 a8083063 Iustin Pop

1778 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1779 a8083063 Iustin Pop

1780 a8083063 Iustin Pop
    """
1781 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1782 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1783 a8083063 Iustin Pop
    if instance is None:
1784 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1785 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1786 a8083063 Iustin Pop
    self.instance = instance
1787 a8083063 Iustin Pop
1788 a8083063 Iustin Pop
1789 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1790 a8083063 Iustin Pop
    """Activate the disks.
1791 a8083063 Iustin Pop

1792 a8083063 Iustin Pop
    """
1793 a8083063 Iustin Pop
    disks_ok, disks_info = _AssembleInstanceDisks(self.instance, self.cfg)
1794 a8083063 Iustin Pop
    if not disks_ok:
1795 3ecf6786 Iustin Pop
      raise errors.OpExecError("Cannot activate block devices")
1796 a8083063 Iustin Pop
1797 a8083063 Iustin Pop
    return disks_info
1798 a8083063 Iustin Pop
1799 a8083063 Iustin Pop
1800 a8083063 Iustin Pop
def _AssembleInstanceDisks(instance, cfg, ignore_secondaries=False):
1801 a8083063 Iustin Pop
  """Prepare the block devices for an instance.
1802 a8083063 Iustin Pop

1803 a8083063 Iustin Pop
  This sets up the block devices on all nodes.
1804 a8083063 Iustin Pop

1805 a8083063 Iustin Pop
  Args:
1806 a8083063 Iustin Pop
    instance: a ganeti.objects.Instance object
1807 a8083063 Iustin Pop
    ignore_secondaries: if true, errors on secondary nodes won't result
1808 a8083063 Iustin Pop
                        in an error return from the function
1809 a8083063 Iustin Pop

1810 a8083063 Iustin Pop
  Returns:
1811 a8083063 Iustin Pop
    false if the operation failed
1812 a8083063 Iustin Pop
    list of (host, instance_visible_name, node_visible_name) if the operation
1813 a8083063 Iustin Pop
         suceeded with the mapping from node devices to instance devices
1814 a8083063 Iustin Pop
  """
1815 a8083063 Iustin Pop
  device_info = []
1816 a8083063 Iustin Pop
  disks_ok = True
1817 a8083063 Iustin Pop
  for inst_disk in instance.disks:
1818 a8083063 Iustin Pop
    master_result = None
1819 a8083063 Iustin Pop
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1820 a8083063 Iustin Pop
      cfg.SetDiskID(node_disk, node)
1821 a8083063 Iustin Pop
      is_primary = node == instance.primary_node
1822 3f78eef2 Iustin Pop
      result = rpc.call_blockdev_assemble(node, node_disk,
1823 3f78eef2 Iustin Pop
                                          instance.name, is_primary)
1824 a8083063 Iustin Pop
      if not result:
1825 a8083063 Iustin Pop
        logger.Error("could not prepare block device %s on node %s (is_pri"
1826 a8083063 Iustin Pop
                     "mary=%s)" % (inst_disk.iv_name, node, is_primary))
1827 a8083063 Iustin Pop
        if is_primary or not ignore_secondaries:
1828 a8083063 Iustin Pop
          disks_ok = False
1829 a8083063 Iustin Pop
      if is_primary:
1830 a8083063 Iustin Pop
        master_result = result
1831 a8083063 Iustin Pop
    device_info.append((instance.primary_node, inst_disk.iv_name,
1832 a8083063 Iustin Pop
                        master_result))
1833 a8083063 Iustin Pop
1834 b352ab5b Iustin Pop
  # leave the disks configured for the primary node
1835 b352ab5b Iustin Pop
  # this is a workaround that would be fixed better by
1836 b352ab5b Iustin Pop
  # improving the logical/physical id handling
1837 b352ab5b Iustin Pop
  for disk in instance.disks:
1838 b352ab5b Iustin Pop
    cfg.SetDiskID(disk, instance.primary_node)
1839 b352ab5b Iustin Pop
1840 a8083063 Iustin Pop
  return disks_ok, device_info
1841 a8083063 Iustin Pop
1842 a8083063 Iustin Pop
1843 fe7b0351 Michael Hanselmann
def _StartInstanceDisks(cfg, instance, force):
1844 3ecf6786 Iustin Pop
  """Start the disks of an instance.
1845 3ecf6786 Iustin Pop

1846 3ecf6786 Iustin Pop
  """
1847 fe7b0351 Michael Hanselmann
  disks_ok, dummy = _AssembleInstanceDisks(instance, cfg,
1848 fe7b0351 Michael Hanselmann
                                           ignore_secondaries=force)
1849 fe7b0351 Michael Hanselmann
  if not disks_ok:
1850 fe7b0351 Michael Hanselmann
    _ShutdownInstanceDisks(instance, cfg)
1851 fe7b0351 Michael Hanselmann
    if force is not None and not force:
1852 fe7b0351 Michael Hanselmann
      logger.Error("If the message above refers to a secondary node,"
1853 fe7b0351 Michael Hanselmann
                   " you can retry the operation using '--force'.")
1854 3ecf6786 Iustin Pop
    raise errors.OpExecError("Disk consistency error")
1855 fe7b0351 Michael Hanselmann
1856 fe7b0351 Michael Hanselmann
1857 a8083063 Iustin Pop
class LUDeactivateInstanceDisks(NoHooksLU):
1858 a8083063 Iustin Pop
  """Shutdown an instance's disks.
1859 a8083063 Iustin Pop

1860 a8083063 Iustin Pop
  """
1861 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
1862 a8083063 Iustin Pop
1863 a8083063 Iustin Pop
  def CheckPrereq(self):
1864 a8083063 Iustin Pop
    """Check prerequisites.
1865 a8083063 Iustin Pop

1866 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1867 a8083063 Iustin Pop

1868 a8083063 Iustin Pop
    """
1869 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1870 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1871 a8083063 Iustin Pop
    if instance is None:
1872 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1873 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1874 a8083063 Iustin Pop
    self.instance = instance
1875 a8083063 Iustin Pop
1876 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1877 a8083063 Iustin Pop
    """Deactivate the disks
1878 a8083063 Iustin Pop

1879 a8083063 Iustin Pop
    """
1880 a8083063 Iustin Pop
    instance = self.instance
1881 a8083063 Iustin Pop
    ins_l = rpc.call_instance_list([instance.primary_node])
1882 a8083063 Iustin Pop
    ins_l = ins_l[instance.primary_node]
1883 a8083063 Iustin Pop
    if not type(ins_l) is list:
1884 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't contact node '%s'" %
1885 3ecf6786 Iustin Pop
                               instance.primary_node)
1886 a8083063 Iustin Pop
1887 a8083063 Iustin Pop
    if self.instance.name in ins_l:
1888 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance is running, can't shutdown"
1889 3ecf6786 Iustin Pop
                               " block devices.")
1890 a8083063 Iustin Pop
1891 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
1892 a8083063 Iustin Pop
1893 a8083063 Iustin Pop
1894 a8083063 Iustin Pop
def _ShutdownInstanceDisks(instance, cfg, ignore_primary=False):
1895 a8083063 Iustin Pop
  """Shutdown block devices of an instance.
1896 a8083063 Iustin Pop

1897 a8083063 Iustin Pop
  This does the shutdown on all nodes of the instance.
1898 a8083063 Iustin Pop

1899 a8083063 Iustin Pop
  If the ignore_primary is false, errors on the primary node are
1900 a8083063 Iustin Pop
  ignored.
1901 a8083063 Iustin Pop

1902 a8083063 Iustin Pop
  """
1903 a8083063 Iustin Pop
  result = True
1904 a8083063 Iustin Pop
  for disk in instance.disks:
1905 a8083063 Iustin Pop
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1906 a8083063 Iustin Pop
      cfg.SetDiskID(top_disk, node)
1907 a8083063 Iustin Pop
      if not rpc.call_blockdev_shutdown(node, top_disk):
1908 a8083063 Iustin Pop
        logger.Error("could not shutdown block device %s on node %s" %
1909 a8083063 Iustin Pop
                     (disk.iv_name, node))
1910 a8083063 Iustin Pop
        if not ignore_primary or node != instance.primary_node:
1911 a8083063 Iustin Pop
          result = False
1912 a8083063 Iustin Pop
  return result
1913 a8083063 Iustin Pop
1914 a8083063 Iustin Pop
1915 a8083063 Iustin Pop
class LUStartupInstance(LogicalUnit):
1916 a8083063 Iustin Pop
  """Starts an instance.
1917 a8083063 Iustin Pop

1918 a8083063 Iustin Pop
  """
1919 a8083063 Iustin Pop
  HPATH = "instance-start"
1920 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
1921 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "force"]
1922 a8083063 Iustin Pop
1923 a8083063 Iustin Pop
  def BuildHooksEnv(self):
1924 a8083063 Iustin Pop
    """Build hooks env.
1925 a8083063 Iustin Pop

1926 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
1927 a8083063 Iustin Pop

1928 a8083063 Iustin Pop
    """
1929 a8083063 Iustin Pop
    env = {
1930 a8083063 Iustin Pop
      "FORCE": self.op.force,
1931 a8083063 Iustin Pop
      }
1932 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
1933 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
1934 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
1935 a8083063 Iustin Pop
    return env, nl, nl
1936 a8083063 Iustin Pop
1937 a8083063 Iustin Pop
  def CheckPrereq(self):
1938 a8083063 Iustin Pop
    """Check prerequisites.
1939 a8083063 Iustin Pop

1940 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
1941 a8083063 Iustin Pop

1942 a8083063 Iustin Pop
    """
1943 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
1944 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
1945 a8083063 Iustin Pop
    if instance is None:
1946 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
1947 3ecf6786 Iustin Pop
                                 self.op.instance_name)
1948 a8083063 Iustin Pop
1949 a8083063 Iustin Pop
    # check bridges existance
1950 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
1951 a8083063 Iustin Pop
1952 a8083063 Iustin Pop
    self.instance = instance
1953 a8083063 Iustin Pop
    self.op.instance_name = instance.name
1954 a8083063 Iustin Pop
1955 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
1956 a8083063 Iustin Pop
    """Start the instance.
1957 a8083063 Iustin Pop

1958 a8083063 Iustin Pop
    """
1959 a8083063 Iustin Pop
    instance = self.instance
1960 a8083063 Iustin Pop
    force = self.op.force
1961 a8083063 Iustin Pop
    extra_args = getattr(self.op, "extra_args", "")
1962 a8083063 Iustin Pop
1963 a8083063 Iustin Pop
    node_current = instance.primary_node
1964 a8083063 Iustin Pop
1965 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([node_current], self.cfg.GetVGName())
1966 a8083063 Iustin Pop
    if not nodeinfo:
1967 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact node %s for infos" %
1968 3ecf6786 Iustin Pop
                               (node_current))
1969 a8083063 Iustin Pop
1970 a8083063 Iustin Pop
    freememory = nodeinfo[node_current]['memory_free']
1971 a8083063 Iustin Pop
    memory = instance.memory
1972 a8083063 Iustin Pop
    if memory > freememory:
1973 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to start instance"
1974 3ecf6786 Iustin Pop
                               " %s on node %s"
1975 3ecf6786 Iustin Pop
                               " needed %s MiB, available %s MiB" %
1976 3ecf6786 Iustin Pop
                               (instance.name, node_current, memory,
1977 3ecf6786 Iustin Pop
                                freememory))
1978 a8083063 Iustin Pop
1979 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, instance, force)
1980 a8083063 Iustin Pop
1981 a8083063 Iustin Pop
    if not rpc.call_instance_start(node_current, instance, extra_args):
1982 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
1983 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not start instance")
1984 a8083063 Iustin Pop
1985 a8083063 Iustin Pop
    self.cfg.MarkInstanceUp(instance.name)
1986 a8083063 Iustin Pop
1987 a8083063 Iustin Pop
1988 bf6929a2 Alexander Schreiber
class LURebootInstance(LogicalUnit):
1989 bf6929a2 Alexander Schreiber
  """Reboot an instance.
1990 bf6929a2 Alexander Schreiber

1991 bf6929a2 Alexander Schreiber
  """
1992 bf6929a2 Alexander Schreiber
  HPATH = "instance-reboot"
1993 bf6929a2 Alexander Schreiber
  HTYPE = constants.HTYPE_INSTANCE
1994 bf6929a2 Alexander Schreiber
  _OP_REQP = ["instance_name", "ignore_secondaries", "reboot_type"]
1995 bf6929a2 Alexander Schreiber
1996 bf6929a2 Alexander Schreiber
  def BuildHooksEnv(self):
1997 bf6929a2 Alexander Schreiber
    """Build hooks env.
1998 bf6929a2 Alexander Schreiber

1999 bf6929a2 Alexander Schreiber
    This runs on master, primary and secondary nodes of the instance.
2000 bf6929a2 Alexander Schreiber

2001 bf6929a2 Alexander Schreiber
    """
2002 bf6929a2 Alexander Schreiber
    env = {
2003 bf6929a2 Alexander Schreiber
      "IGNORE_SECONDARIES": self.op.ignore_secondaries,
2004 bf6929a2 Alexander Schreiber
      }
2005 bf6929a2 Alexander Schreiber
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2006 bf6929a2 Alexander Schreiber
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2007 bf6929a2 Alexander Schreiber
          list(self.instance.secondary_nodes))
2008 bf6929a2 Alexander Schreiber
    return env, nl, nl
2009 bf6929a2 Alexander Schreiber
2010 bf6929a2 Alexander Schreiber
  def CheckPrereq(self):
2011 bf6929a2 Alexander Schreiber
    """Check prerequisites.
2012 bf6929a2 Alexander Schreiber

2013 bf6929a2 Alexander Schreiber
    This checks that the instance is in the cluster.
2014 bf6929a2 Alexander Schreiber

2015 bf6929a2 Alexander Schreiber
    """
2016 bf6929a2 Alexander Schreiber
    instance = self.cfg.GetInstanceInfo(
2017 bf6929a2 Alexander Schreiber
      self.cfg.ExpandInstanceName(self.op.instance_name))
2018 bf6929a2 Alexander Schreiber
    if instance is None:
2019 bf6929a2 Alexander Schreiber
      raise errors.OpPrereqError("Instance '%s' not known" %
2020 bf6929a2 Alexander Schreiber
                                 self.op.instance_name)
2021 bf6929a2 Alexander Schreiber
2022 bf6929a2 Alexander Schreiber
    # check bridges existance
2023 bf6929a2 Alexander Schreiber
    _CheckInstanceBridgesExist(instance)
2024 bf6929a2 Alexander Schreiber
2025 bf6929a2 Alexander Schreiber
    self.instance = instance
2026 bf6929a2 Alexander Schreiber
    self.op.instance_name = instance.name
2027 bf6929a2 Alexander Schreiber
2028 bf6929a2 Alexander Schreiber
  def Exec(self, feedback_fn):
2029 bf6929a2 Alexander Schreiber
    """Reboot the instance.
2030 bf6929a2 Alexander Schreiber

2031 bf6929a2 Alexander Schreiber
    """
2032 bf6929a2 Alexander Schreiber
    instance = self.instance
2033 bf6929a2 Alexander Schreiber
    ignore_secondaries = self.op.ignore_secondaries
2034 bf6929a2 Alexander Schreiber
    reboot_type = self.op.reboot_type
2035 bf6929a2 Alexander Schreiber
    extra_args = getattr(self.op, "extra_args", "")
2036 bf6929a2 Alexander Schreiber
2037 bf6929a2 Alexander Schreiber
    node_current = instance.primary_node
2038 bf6929a2 Alexander Schreiber
2039 bf6929a2 Alexander Schreiber
    if reboot_type not in [constants.INSTANCE_REBOOT_SOFT,
2040 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_HARD,
2041 bf6929a2 Alexander Schreiber
                           constants.INSTANCE_REBOOT_FULL]:
2042 bf6929a2 Alexander Schreiber
      raise errors.ParameterError("reboot type not in [%s, %s, %s]" %
2043 bf6929a2 Alexander Schreiber
                                  (constants.INSTANCE_REBOOT_SOFT,
2044 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_HARD,
2045 bf6929a2 Alexander Schreiber
                                   constants.INSTANCE_REBOOT_FULL))
2046 bf6929a2 Alexander Schreiber
2047 bf6929a2 Alexander Schreiber
    if reboot_type in [constants.INSTANCE_REBOOT_SOFT,
2048 bf6929a2 Alexander Schreiber
                       constants.INSTANCE_REBOOT_HARD]:
2049 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_reboot(node_current, instance,
2050 bf6929a2 Alexander Schreiber
                                      reboot_type, extra_args):
2051 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not reboot instance")
2052 bf6929a2 Alexander Schreiber
    else:
2053 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_shutdown(node_current, instance):
2054 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("could not shutdown instance for full reboot")
2055 bf6929a2 Alexander Schreiber
      _ShutdownInstanceDisks(instance, self.cfg)
2056 bf6929a2 Alexander Schreiber
      _StartInstanceDisks(self.cfg, instance, ignore_secondaries)
2057 bf6929a2 Alexander Schreiber
      if not rpc.call_instance_start(node_current, instance, extra_args):
2058 bf6929a2 Alexander Schreiber
        _ShutdownInstanceDisks(instance, self.cfg)
2059 bf6929a2 Alexander Schreiber
        raise errors.OpExecError("Could not start instance for full reboot")
2060 bf6929a2 Alexander Schreiber
2061 bf6929a2 Alexander Schreiber
    self.cfg.MarkInstanceUp(instance.name)
2062 bf6929a2 Alexander Schreiber
2063 bf6929a2 Alexander Schreiber
2064 a8083063 Iustin Pop
class LUShutdownInstance(LogicalUnit):
2065 a8083063 Iustin Pop
  """Shutdown an instance.
2066 a8083063 Iustin Pop

2067 a8083063 Iustin Pop
  """
2068 a8083063 Iustin Pop
  HPATH = "instance-stop"
2069 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2070 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2071 a8083063 Iustin Pop
2072 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2073 a8083063 Iustin Pop
    """Build hooks env.
2074 a8083063 Iustin Pop

2075 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2076 a8083063 Iustin Pop

2077 a8083063 Iustin Pop
    """
2078 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2079 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2080 a8083063 Iustin Pop
          list(self.instance.secondary_nodes))
2081 a8083063 Iustin Pop
    return env, nl, nl
2082 a8083063 Iustin Pop
2083 a8083063 Iustin Pop
  def CheckPrereq(self):
2084 a8083063 Iustin Pop
    """Check prerequisites.
2085 a8083063 Iustin Pop

2086 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2087 a8083063 Iustin Pop

2088 a8083063 Iustin Pop
    """
2089 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2090 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2091 a8083063 Iustin Pop
    if instance is None:
2092 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2093 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2094 a8083063 Iustin Pop
    self.instance = instance
2095 a8083063 Iustin Pop
2096 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2097 a8083063 Iustin Pop
    """Shutdown the instance.
2098 a8083063 Iustin Pop

2099 a8083063 Iustin Pop
    """
2100 a8083063 Iustin Pop
    instance = self.instance
2101 a8083063 Iustin Pop
    node_current = instance.primary_node
2102 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(node_current, instance):
2103 a8083063 Iustin Pop
      logger.Error("could not shutdown instance")
2104 a8083063 Iustin Pop
2105 a8083063 Iustin Pop
    self.cfg.MarkInstanceDown(instance.name)
2106 a8083063 Iustin Pop
    _ShutdownInstanceDisks(instance, self.cfg)
2107 a8083063 Iustin Pop
2108 a8083063 Iustin Pop
2109 fe7b0351 Michael Hanselmann
class LUReinstallInstance(LogicalUnit):
2110 fe7b0351 Michael Hanselmann
  """Reinstall an instance.
2111 fe7b0351 Michael Hanselmann

2112 fe7b0351 Michael Hanselmann
  """
2113 fe7b0351 Michael Hanselmann
  HPATH = "instance-reinstall"
2114 fe7b0351 Michael Hanselmann
  HTYPE = constants.HTYPE_INSTANCE
2115 fe7b0351 Michael Hanselmann
  _OP_REQP = ["instance_name"]
2116 fe7b0351 Michael Hanselmann
2117 fe7b0351 Michael Hanselmann
  def BuildHooksEnv(self):
2118 fe7b0351 Michael Hanselmann
    """Build hooks env.
2119 fe7b0351 Michael Hanselmann

2120 fe7b0351 Michael Hanselmann
    This runs on master, primary and secondary nodes of the instance.
2121 fe7b0351 Michael Hanselmann

2122 fe7b0351 Michael Hanselmann
    """
2123 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2124 fe7b0351 Michael Hanselmann
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2125 fe7b0351 Michael Hanselmann
          list(self.instance.secondary_nodes))
2126 fe7b0351 Michael Hanselmann
    return env, nl, nl
2127 fe7b0351 Michael Hanselmann
2128 fe7b0351 Michael Hanselmann
  def CheckPrereq(self):
2129 fe7b0351 Michael Hanselmann
    """Check prerequisites.
2130 fe7b0351 Michael Hanselmann

2131 fe7b0351 Michael Hanselmann
    This checks that the instance is in the cluster and is not running.
2132 fe7b0351 Michael Hanselmann

2133 fe7b0351 Michael Hanselmann
    """
2134 fe7b0351 Michael Hanselmann
    instance = self.cfg.GetInstanceInfo(
2135 fe7b0351 Michael Hanselmann
      self.cfg.ExpandInstanceName(self.op.instance_name))
2136 fe7b0351 Michael Hanselmann
    if instance is None:
2137 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2138 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2139 fe7b0351 Michael Hanselmann
    if instance.disk_template == constants.DT_DISKLESS:
2140 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' has no disks" %
2141 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2142 fe7b0351 Michael Hanselmann
    if instance.status != "down":
2143 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2144 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2145 fe7b0351 Michael Hanselmann
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2146 fe7b0351 Michael Hanselmann
    if remote_info:
2147 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2148 3ecf6786 Iustin Pop
                                 (self.op.instance_name,
2149 3ecf6786 Iustin Pop
                                  instance.primary_node))
2150 d0834de3 Michael Hanselmann
2151 d0834de3 Michael Hanselmann
    self.op.os_type = getattr(self.op, "os_type", None)
2152 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2153 d0834de3 Michael Hanselmann
      # OS verification
2154 d0834de3 Michael Hanselmann
      pnode = self.cfg.GetNodeInfo(
2155 d0834de3 Michael Hanselmann
        self.cfg.ExpandNodeName(instance.primary_node))
2156 d0834de3 Michael Hanselmann
      if pnode is None:
2157 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Primary node '%s' is unknown" %
2158 3ecf6786 Iustin Pop
                                   self.op.pnode)
2159 00fe9e38 Guido Trotter
      os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2160 dfa96ded Guido Trotter
      if not os_obj:
2161 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("OS '%s' not in supported OS list for"
2162 3ecf6786 Iustin Pop
                                   " primary node"  % self.op.os_type)
2163 d0834de3 Michael Hanselmann
2164 fe7b0351 Michael Hanselmann
    self.instance = instance
2165 fe7b0351 Michael Hanselmann
2166 fe7b0351 Michael Hanselmann
  def Exec(self, feedback_fn):
2167 fe7b0351 Michael Hanselmann
    """Reinstall the instance.
2168 fe7b0351 Michael Hanselmann

2169 fe7b0351 Michael Hanselmann
    """
2170 fe7b0351 Michael Hanselmann
    inst = self.instance
2171 fe7b0351 Michael Hanselmann
2172 d0834de3 Michael Hanselmann
    if self.op.os_type is not None:
2173 d0834de3 Michael Hanselmann
      feedback_fn("Changing OS to '%s'..." % self.op.os_type)
2174 d0834de3 Michael Hanselmann
      inst.os = self.op.os_type
2175 d0834de3 Michael Hanselmann
      self.cfg.AddInstance(inst)
2176 d0834de3 Michael Hanselmann
2177 fe7b0351 Michael Hanselmann
    _StartInstanceDisks(self.cfg, inst, None)
2178 fe7b0351 Michael Hanselmann
    try:
2179 fe7b0351 Michael Hanselmann
      feedback_fn("Running the instance OS create scripts...")
2180 fe7b0351 Michael Hanselmann
      if not rpc.call_instance_os_add(inst.primary_node, inst, "sda", "sdb"):
2181 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not install OS for instance %s "
2182 3ecf6786 Iustin Pop
                                 "on node %s" %
2183 3ecf6786 Iustin Pop
                                 (inst.name, inst.primary_node))
2184 fe7b0351 Michael Hanselmann
    finally:
2185 fe7b0351 Michael Hanselmann
      _ShutdownInstanceDisks(inst, self.cfg)
2186 fe7b0351 Michael Hanselmann
2187 fe7b0351 Michael Hanselmann
2188 decd5f45 Iustin Pop
class LURenameInstance(LogicalUnit):
2189 decd5f45 Iustin Pop
  """Rename an instance.
2190 decd5f45 Iustin Pop

2191 decd5f45 Iustin Pop
  """
2192 decd5f45 Iustin Pop
  HPATH = "instance-rename"
2193 decd5f45 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2194 decd5f45 Iustin Pop
  _OP_REQP = ["instance_name", "new_name"]
2195 decd5f45 Iustin Pop
2196 decd5f45 Iustin Pop
  def BuildHooksEnv(self):
2197 decd5f45 Iustin Pop
    """Build hooks env.
2198 decd5f45 Iustin Pop

2199 decd5f45 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2200 decd5f45 Iustin Pop

2201 decd5f45 Iustin Pop
    """
2202 decd5f45 Iustin Pop
    env = _BuildInstanceHookEnvByObject(self.instance)
2203 decd5f45 Iustin Pop
    env["INSTANCE_NEW_NAME"] = self.op.new_name
2204 decd5f45 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.instance.primary_node] +
2205 decd5f45 Iustin Pop
          list(self.instance.secondary_nodes))
2206 decd5f45 Iustin Pop
    return env, nl, nl
2207 decd5f45 Iustin Pop
2208 decd5f45 Iustin Pop
  def CheckPrereq(self):
2209 decd5f45 Iustin Pop
    """Check prerequisites.
2210 decd5f45 Iustin Pop

2211 decd5f45 Iustin Pop
    This checks that the instance is in the cluster and is not running.
2212 decd5f45 Iustin Pop

2213 decd5f45 Iustin Pop
    """
2214 decd5f45 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2215 decd5f45 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2216 decd5f45 Iustin Pop
    if instance is None:
2217 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2218 decd5f45 Iustin Pop
                                 self.op.instance_name)
2219 decd5f45 Iustin Pop
    if instance.status != "down":
2220 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is marked to be up" %
2221 decd5f45 Iustin Pop
                                 self.op.instance_name)
2222 decd5f45 Iustin Pop
    remote_info = rpc.call_instance_info(instance.primary_node, instance.name)
2223 decd5f45 Iustin Pop
    if remote_info:
2224 decd5f45 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is running on the node %s" %
2225 decd5f45 Iustin Pop
                                 (self.op.instance_name,
2226 decd5f45 Iustin Pop
                                  instance.primary_node))
2227 decd5f45 Iustin Pop
    self.instance = instance
2228 decd5f45 Iustin Pop
2229 decd5f45 Iustin Pop
    # new name verification
2230 89e1fc26 Iustin Pop
    name_info = utils.HostInfo(self.op.new_name)
2231 decd5f45 Iustin Pop
2232 89e1fc26 Iustin Pop
    self.op.new_name = new_name = name_info.name
2233 decd5f45 Iustin Pop
    if not getattr(self.op, "ignore_ip", False):
2234 89e1fc26 Iustin Pop
      command = ["fping", "-q", name_info.ip]
2235 decd5f45 Iustin Pop
      result = utils.RunCmd(command)
2236 decd5f45 Iustin Pop
      if not result.failed:
2237 decd5f45 Iustin Pop
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2238 89e1fc26 Iustin Pop
                                   (name_info.ip, new_name))
2239 decd5f45 Iustin Pop
2240 decd5f45 Iustin Pop
2241 decd5f45 Iustin Pop
  def Exec(self, feedback_fn):
2242 decd5f45 Iustin Pop
    """Reinstall the instance.
2243 decd5f45 Iustin Pop

2244 decd5f45 Iustin Pop
    """
2245 decd5f45 Iustin Pop
    inst = self.instance
2246 decd5f45 Iustin Pop
    old_name = inst.name
2247 decd5f45 Iustin Pop
2248 decd5f45 Iustin Pop
    self.cfg.RenameInstance(inst.name, self.op.new_name)
2249 decd5f45 Iustin Pop
2250 decd5f45 Iustin Pop
    # re-read the instance from the configuration after rename
2251 decd5f45 Iustin Pop
    inst = self.cfg.GetInstanceInfo(self.op.new_name)
2252 decd5f45 Iustin Pop
2253 decd5f45 Iustin Pop
    _StartInstanceDisks(self.cfg, inst, None)
2254 decd5f45 Iustin Pop
    try:
2255 decd5f45 Iustin Pop
      if not rpc.call_instance_run_rename(inst.primary_node, inst, old_name,
2256 decd5f45 Iustin Pop
                                          "sda", "sdb"):
2257 decd5f45 Iustin Pop
        msg = ("Could run OS rename script for instance %s\n"
2258 decd5f45 Iustin Pop
               "on node %s\n"
2259 decd5f45 Iustin Pop
               "(but the instance has been renamed in Ganeti)" %
2260 decd5f45 Iustin Pop
               (inst.name, inst.primary_node))
2261 decd5f45 Iustin Pop
        logger.Error(msg)
2262 decd5f45 Iustin Pop
    finally:
2263 decd5f45 Iustin Pop
      _ShutdownInstanceDisks(inst, self.cfg)
2264 decd5f45 Iustin Pop
2265 decd5f45 Iustin Pop
2266 a8083063 Iustin Pop
class LURemoveInstance(LogicalUnit):
2267 a8083063 Iustin Pop
  """Remove an instance.
2268 a8083063 Iustin Pop

2269 a8083063 Iustin Pop
  """
2270 a8083063 Iustin Pop
  HPATH = "instance-remove"
2271 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2272 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
2273 a8083063 Iustin Pop
2274 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2275 a8083063 Iustin Pop
    """Build hooks env.
2276 a8083063 Iustin Pop

2277 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2278 a8083063 Iustin Pop

2279 a8083063 Iustin Pop
    """
2280 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance)
2281 1d67656e Iustin Pop
    nl = [self.sstore.GetMasterNode()]
2282 a8083063 Iustin Pop
    return env, nl, nl
2283 a8083063 Iustin Pop
2284 a8083063 Iustin Pop
  def CheckPrereq(self):
2285 a8083063 Iustin Pop
    """Check prerequisites.
2286 a8083063 Iustin Pop

2287 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2288 a8083063 Iustin Pop

2289 a8083063 Iustin Pop
    """
2290 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2291 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2292 a8083063 Iustin Pop
    if instance is None:
2293 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2294 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2295 a8083063 Iustin Pop
    self.instance = instance
2296 a8083063 Iustin Pop
2297 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2298 a8083063 Iustin Pop
    """Remove the instance.
2299 a8083063 Iustin Pop

2300 a8083063 Iustin Pop
    """
2301 a8083063 Iustin Pop
    instance = self.instance
2302 a8083063 Iustin Pop
    logger.Info("shutting down instance %s on node %s" %
2303 a8083063 Iustin Pop
                (instance.name, instance.primary_node))
2304 a8083063 Iustin Pop
2305 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(instance.primary_node, instance):
2306 1d67656e Iustin Pop
      if self.op.ignore_failures:
2307 1d67656e Iustin Pop
        feedback_fn("Warning: can't shutdown instance")
2308 1d67656e Iustin Pop
      else:
2309 1d67656e Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2310 1d67656e Iustin Pop
                                 (instance.name, instance.primary_node))
2311 a8083063 Iustin Pop
2312 a8083063 Iustin Pop
    logger.Info("removing block devices for instance %s" % instance.name)
2313 a8083063 Iustin Pop
2314 1d67656e Iustin Pop
    if not _RemoveDisks(instance, self.cfg):
2315 1d67656e Iustin Pop
      if self.op.ignore_failures:
2316 1d67656e Iustin Pop
        feedback_fn("Warning: can't remove instance's disks")
2317 1d67656e Iustin Pop
      else:
2318 1d67656e Iustin Pop
        raise errors.OpExecError("Can't remove instance's disks")
2319 a8083063 Iustin Pop
2320 a8083063 Iustin Pop
    logger.Info("removing instance %s out of cluster config" % instance.name)
2321 a8083063 Iustin Pop
2322 a8083063 Iustin Pop
    self.cfg.RemoveInstance(instance.name)
2323 a8083063 Iustin Pop
2324 a8083063 Iustin Pop
2325 a8083063 Iustin Pop
class LUQueryInstances(NoHooksLU):
2326 a8083063 Iustin Pop
  """Logical unit for querying instances.
2327 a8083063 Iustin Pop

2328 a8083063 Iustin Pop
  """
2329 069dcc86 Iustin Pop
  _OP_REQP = ["output_fields", "names"]
2330 a8083063 Iustin Pop
2331 a8083063 Iustin Pop
  def CheckPrereq(self):
2332 a8083063 Iustin Pop
    """Check prerequisites.
2333 a8083063 Iustin Pop

2334 a8083063 Iustin Pop
    This checks that the fields required are valid output fields.
2335 a8083063 Iustin Pop

2336 a8083063 Iustin Pop
    """
2337 a8083063 Iustin Pop
    self.dynamic_fields = frozenset(["oper_state", "oper_ram"])
2338 dcb93971 Michael Hanselmann
    _CheckOutputFields(static=["name", "os", "pnode", "snodes",
2339 dcb93971 Michael Hanselmann
                               "admin_state", "admin_ram",
2340 644eeef9 Iustin Pop
                               "disk_template", "ip", "mac", "bridge",
2341 644eeef9 Iustin Pop
                               "sda_size", "sdb_size"],
2342 dcb93971 Michael Hanselmann
                       dynamic=self.dynamic_fields,
2343 dcb93971 Michael Hanselmann
                       selected=self.op.output_fields)
2344 a8083063 Iustin Pop
2345 069dcc86 Iustin Pop
    self.wanted = _GetWantedInstances(self, self.op.names)
2346 069dcc86 Iustin Pop
2347 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2348 a8083063 Iustin Pop
    """Computes the list of nodes and their attributes.
2349 a8083063 Iustin Pop

2350 a8083063 Iustin Pop
    """
2351 069dcc86 Iustin Pop
    instance_names = self.wanted
2352 a8083063 Iustin Pop
    instance_list = [self.cfg.GetInstanceInfo(iname) for iname
2353 a8083063 Iustin Pop
                     in instance_names]
2354 a8083063 Iustin Pop
2355 a8083063 Iustin Pop
    # begin data gathering
2356 a8083063 Iustin Pop
2357 a8083063 Iustin Pop
    nodes = frozenset([inst.primary_node for inst in instance_list])
2358 a8083063 Iustin Pop
2359 a8083063 Iustin Pop
    bad_nodes = []
2360 a8083063 Iustin Pop
    if self.dynamic_fields.intersection(self.op.output_fields):
2361 a8083063 Iustin Pop
      live_data = {}
2362 a8083063 Iustin Pop
      node_data = rpc.call_all_instances_info(nodes)
2363 a8083063 Iustin Pop
      for name in nodes:
2364 a8083063 Iustin Pop
        result = node_data[name]
2365 a8083063 Iustin Pop
        if result:
2366 a8083063 Iustin Pop
          live_data.update(result)
2367 a8083063 Iustin Pop
        elif result == False:
2368 a8083063 Iustin Pop
          bad_nodes.append(name)
2369 a8083063 Iustin Pop
        # else no instance is alive
2370 a8083063 Iustin Pop
    else:
2371 a8083063 Iustin Pop
      live_data = dict([(name, {}) for name in instance_names])
2372 a8083063 Iustin Pop
2373 a8083063 Iustin Pop
    # end data gathering
2374 a8083063 Iustin Pop
2375 a8083063 Iustin Pop
    output = []
2376 a8083063 Iustin Pop
    for instance in instance_list:
2377 a8083063 Iustin Pop
      iout = []
2378 a8083063 Iustin Pop
      for field in self.op.output_fields:
2379 a8083063 Iustin Pop
        if field == "name":
2380 a8083063 Iustin Pop
          val = instance.name
2381 a8083063 Iustin Pop
        elif field == "os":
2382 a8083063 Iustin Pop
          val = instance.os
2383 a8083063 Iustin Pop
        elif field == "pnode":
2384 a8083063 Iustin Pop
          val = instance.primary_node
2385 a8083063 Iustin Pop
        elif field == "snodes":
2386 8a23d2d3 Iustin Pop
          val = list(instance.secondary_nodes)
2387 a8083063 Iustin Pop
        elif field == "admin_state":
2388 8a23d2d3 Iustin Pop
          val = (instance.status != "down")
2389 a8083063 Iustin Pop
        elif field == "oper_state":
2390 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2391 8a23d2d3 Iustin Pop
            val = None
2392 a8083063 Iustin Pop
          else:
2393 8a23d2d3 Iustin Pop
            val = bool(live_data.get(instance.name))
2394 a8083063 Iustin Pop
        elif field == "admin_ram":
2395 a8083063 Iustin Pop
          val = instance.memory
2396 a8083063 Iustin Pop
        elif field == "oper_ram":
2397 a8083063 Iustin Pop
          if instance.primary_node in bad_nodes:
2398 8a23d2d3 Iustin Pop
            val = None
2399 a8083063 Iustin Pop
          elif instance.name in live_data:
2400 a8083063 Iustin Pop
            val = live_data[instance.name].get("memory", "?")
2401 a8083063 Iustin Pop
          else:
2402 a8083063 Iustin Pop
            val = "-"
2403 a8083063 Iustin Pop
        elif field == "disk_template":
2404 a8083063 Iustin Pop
          val = instance.disk_template
2405 a8083063 Iustin Pop
        elif field == "ip":
2406 a8083063 Iustin Pop
          val = instance.nics[0].ip
2407 a8083063 Iustin Pop
        elif field == "bridge":
2408 a8083063 Iustin Pop
          val = instance.nics[0].bridge
2409 a8083063 Iustin Pop
        elif field == "mac":
2410 a8083063 Iustin Pop
          val = instance.nics[0].mac
2411 644eeef9 Iustin Pop
        elif field == "sda_size" or field == "sdb_size":
2412 644eeef9 Iustin Pop
          disk = instance.FindDisk(field[:3])
2413 644eeef9 Iustin Pop
          if disk is None:
2414 8a23d2d3 Iustin Pop
            val = None
2415 644eeef9 Iustin Pop
          else:
2416 644eeef9 Iustin Pop
            val = disk.size
2417 a8083063 Iustin Pop
        else:
2418 3ecf6786 Iustin Pop
          raise errors.ParameterError(field)
2419 a8083063 Iustin Pop
        iout.append(val)
2420 a8083063 Iustin Pop
      output.append(iout)
2421 a8083063 Iustin Pop
2422 a8083063 Iustin Pop
    return output
2423 a8083063 Iustin Pop
2424 a8083063 Iustin Pop
2425 a8083063 Iustin Pop
class LUFailoverInstance(LogicalUnit):
2426 a8083063 Iustin Pop
  """Failover an instance.
2427 a8083063 Iustin Pop

2428 a8083063 Iustin Pop
  """
2429 a8083063 Iustin Pop
  HPATH = "instance-failover"
2430 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2431 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "ignore_consistency"]
2432 a8083063 Iustin Pop
2433 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2434 a8083063 Iustin Pop
    """Build hooks env.
2435 a8083063 Iustin Pop

2436 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2437 a8083063 Iustin Pop

2438 a8083063 Iustin Pop
    """
2439 a8083063 Iustin Pop
    env = {
2440 a8083063 Iustin Pop
      "IGNORE_CONSISTENCY": self.op.ignore_consistency,
2441 a8083063 Iustin Pop
      }
2442 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
2443 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode()] + list(self.instance.secondary_nodes)
2444 a8083063 Iustin Pop
    return env, nl, nl
2445 a8083063 Iustin Pop
2446 a8083063 Iustin Pop
  def CheckPrereq(self):
2447 a8083063 Iustin Pop
    """Check prerequisites.
2448 a8083063 Iustin Pop

2449 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
2450 a8083063 Iustin Pop

2451 a8083063 Iustin Pop
    """
2452 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
2453 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
2454 a8083063 Iustin Pop
    if instance is None:
2455 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
2456 3ecf6786 Iustin Pop
                                 self.op.instance_name)
2457 a8083063 Iustin Pop
2458 a1f445d3 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
2459 2a710df1 Michael Hanselmann
      raise errors.OpPrereqError("Instance's disk layout is not"
2460 a1f445d3 Iustin Pop
                                 " network mirrored, cannot failover.")
2461 2a710df1 Michael Hanselmann
2462 2a710df1 Michael Hanselmann
    secondary_nodes = instance.secondary_nodes
2463 2a710df1 Michael Hanselmann
    if not secondary_nodes:
2464 2a710df1 Michael Hanselmann
      raise errors.ProgrammerError("no secondary node but using "
2465 2a710df1 Michael Hanselmann
                                   "DT_REMOTE_RAID1 template")
2466 2a710df1 Michael Hanselmann
2467 3a7c308e Guido Trotter
    # check memory requirements on the secondary node
2468 2a710df1 Michael Hanselmann
    target_node = secondary_nodes[0]
2469 3a7c308e Guido Trotter
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2470 3a7c308e Guido Trotter
    info = nodeinfo.get(target_node, None)
2471 3a7c308e Guido Trotter
    if not info:
2472 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot get current information"
2473 3ecf6786 Iustin Pop
                                 " from node '%s'" % nodeinfo)
2474 3a7c308e Guido Trotter
    if instance.memory > info['memory_free']:
2475 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Not enough memory on target node %s."
2476 3ecf6786 Iustin Pop
                                 " %d MB available, %d MB required" %
2477 3ecf6786 Iustin Pop
                                 (target_node, info['memory_free'],
2478 3ecf6786 Iustin Pop
                                  instance.memory))
2479 3a7c308e Guido Trotter
2480 a8083063 Iustin Pop
    # check bridge existance
2481 a8083063 Iustin Pop
    brlist = [nic.bridge for nic in instance.nics]
2482 50ff9a7a Iustin Pop
    if not rpc.call_bridges_exist(target_node, brlist):
2483 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("One or more target bridges %s does not"
2484 3ecf6786 Iustin Pop
                                 " exist on destination node '%s'" %
2485 50ff9a7a Iustin Pop
                                 (brlist, target_node))
2486 a8083063 Iustin Pop
2487 a8083063 Iustin Pop
    self.instance = instance
2488 a8083063 Iustin Pop
2489 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
2490 a8083063 Iustin Pop
    """Failover an instance.
2491 a8083063 Iustin Pop

2492 a8083063 Iustin Pop
    The failover is done by shutting it down on its present node and
2493 a8083063 Iustin Pop
    starting it on the secondary.
2494 a8083063 Iustin Pop

2495 a8083063 Iustin Pop
    """
2496 a8083063 Iustin Pop
    instance = self.instance
2497 a8083063 Iustin Pop
2498 a8083063 Iustin Pop
    source_node = instance.primary_node
2499 a8083063 Iustin Pop
    target_node = instance.secondary_nodes[0]
2500 a8083063 Iustin Pop
2501 a8083063 Iustin Pop
    feedback_fn("* checking disk consistency between source and target")
2502 a8083063 Iustin Pop
    for dev in instance.disks:
2503 a8083063 Iustin Pop
      # for remote_raid1, these are md over drbd
2504 a8083063 Iustin Pop
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2505 a8083063 Iustin Pop
        if not self.op.ignore_consistency:
2506 3ecf6786 Iustin Pop
          raise errors.OpExecError("Disk %s is degraded on target node,"
2507 3ecf6786 Iustin Pop
                                   " aborting failover." % dev.iv_name)
2508 a8083063 Iustin Pop
2509 a8083063 Iustin Pop
    feedback_fn("* checking target node resource availability")
2510 a8083063 Iustin Pop
    nodeinfo = rpc.call_node_info([target_node], self.cfg.GetVGName())
2511 a8083063 Iustin Pop
2512 a8083063 Iustin Pop
    if not nodeinfo:
2513 3ecf6786 Iustin Pop
      raise errors.OpExecError("Could not contact target node %s." %
2514 3ecf6786 Iustin Pop
                               target_node)
2515 a8083063 Iustin Pop
2516 a8083063 Iustin Pop
    free_memory = int(nodeinfo[target_node]['memory_free'])
2517 a8083063 Iustin Pop
    memory = instance.memory
2518 a8083063 Iustin Pop
    if memory > free_memory:
2519 3ecf6786 Iustin Pop
      raise errors.OpExecError("Not enough memory to create instance %s on"
2520 3ecf6786 Iustin Pop
                               " node %s. needed %s MiB, available %s MiB" %
2521 3ecf6786 Iustin Pop
                               (instance.name, target_node, memory,
2522 3ecf6786 Iustin Pop
                                free_memory))
2523 a8083063 Iustin Pop
2524 a8083063 Iustin Pop
    feedback_fn("* shutting down instance on source node")
2525 a8083063 Iustin Pop
    logger.Info("Shutting down instance %s on node %s" %
2526 a8083063 Iustin Pop
                (instance.name, source_node))
2527 a8083063 Iustin Pop
2528 a8083063 Iustin Pop
    if not rpc.call_instance_shutdown(source_node, instance):
2529 24a40d57 Iustin Pop
      if self.op.ignore_consistency:
2530 24a40d57 Iustin Pop
        logger.Error("Could not shutdown instance %s on node %s. Proceeding"
2531 24a40d57 Iustin Pop
                     " anyway. Please make sure node %s is down"  %
2532 24a40d57 Iustin Pop
                     (instance.name, source_node, source_node))
2533 24a40d57 Iustin Pop
      else:
2534 24a40d57 Iustin Pop
        raise errors.OpExecError("Could not shutdown instance %s on node %s" %
2535 24a40d57 Iustin Pop
                                 (instance.name, source_node))
2536 a8083063 Iustin Pop
2537 a8083063 Iustin Pop
    feedback_fn("* deactivating the instance's disks on source node")
2538 a8083063 Iustin Pop
    if not _ShutdownInstanceDisks(instance, self.cfg, ignore_primary=True):
2539 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't shut down the instance's disks.")
2540 a8083063 Iustin Pop
2541 a8083063 Iustin Pop
    instance.primary_node = target_node
2542 a8083063 Iustin Pop
    # distribute new instance config to the other nodes
2543 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
2544 a8083063 Iustin Pop
2545 a8083063 Iustin Pop
    feedback_fn("* activating the instance's disks on target node")
2546 a8083063 Iustin Pop
    logger.Info("Starting instance %s on node %s" %
2547 a8083063 Iustin Pop
                (instance.name, target_node))
2548 a8083063 Iustin Pop
2549 a8083063 Iustin Pop
    disks_ok, dummy = _AssembleInstanceDisks(instance, self.cfg,
2550 a8083063 Iustin Pop
                                             ignore_secondaries=True)
2551 a8083063 Iustin Pop
    if not disks_ok:
2552 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2553 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't activate the instance's disks")
2554 a8083063 Iustin Pop
2555 a8083063 Iustin Pop
    feedback_fn("* starting the instance on the target node")
2556 a8083063 Iustin Pop
    if not rpc.call_instance_start(target_node, instance, None):
2557 a8083063 Iustin Pop
      _ShutdownInstanceDisks(instance, self.cfg)
2558 a8083063 Iustin Pop
      raise errors.OpExecError("Could not start instance %s on node %s." %
2559 d0b3526f Michael Hanselmann
                               (instance.name, target_node))
2560 a8083063 Iustin Pop
2561 a8083063 Iustin Pop
2562 3f78eef2 Iustin Pop
def _CreateBlockDevOnPrimary(cfg, node, instance, device, info):
2563 a8083063 Iustin Pop
  """Create a tree of block devices on the primary node.
2564 a8083063 Iustin Pop

2565 a8083063 Iustin Pop
  This always creates all devices.
2566 a8083063 Iustin Pop

2567 a8083063 Iustin Pop
  """
2568 a8083063 Iustin Pop
  if device.children:
2569 a8083063 Iustin Pop
    for child in device.children:
2570 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, node, instance, child, info):
2571 a8083063 Iustin Pop
        return False
2572 a8083063 Iustin Pop
2573 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2574 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2575 3f78eef2 Iustin Pop
                                    instance.name, True, info)
2576 a8083063 Iustin Pop
  if not new_id:
2577 a8083063 Iustin Pop
    return False
2578 a8083063 Iustin Pop
  if device.physical_id is None:
2579 a8083063 Iustin Pop
    device.physical_id = new_id
2580 a8083063 Iustin Pop
  return True
2581 a8083063 Iustin Pop
2582 a8083063 Iustin Pop
2583 3f78eef2 Iustin Pop
def _CreateBlockDevOnSecondary(cfg, node, instance, device, force, info):
2584 a8083063 Iustin Pop
  """Create a tree of block devices on a secondary node.
2585 a8083063 Iustin Pop

2586 a8083063 Iustin Pop
  If this device type has to be created on secondaries, create it and
2587 a8083063 Iustin Pop
  all its children.
2588 a8083063 Iustin Pop

2589 a8083063 Iustin Pop
  If not, just recurse to children keeping the same 'force' value.
2590 a8083063 Iustin Pop

2591 a8083063 Iustin Pop
  """
2592 a8083063 Iustin Pop
  if device.CreateOnSecondary():
2593 a8083063 Iustin Pop
    force = True
2594 a8083063 Iustin Pop
  if device.children:
2595 a8083063 Iustin Pop
    for child in device.children:
2596 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, node, instance,
2597 3f78eef2 Iustin Pop
                                        child, force, info):
2598 a8083063 Iustin Pop
        return False
2599 a8083063 Iustin Pop
2600 a8083063 Iustin Pop
  if not force:
2601 a8083063 Iustin Pop
    return True
2602 a8083063 Iustin Pop
  cfg.SetDiskID(device, node)
2603 3f78eef2 Iustin Pop
  new_id = rpc.call_blockdev_create(node, device, device.size,
2604 3f78eef2 Iustin Pop
                                    instance.name, False, info)
2605 a8083063 Iustin Pop
  if not new_id:
2606 a8083063 Iustin Pop
    return False
2607 a8083063 Iustin Pop
  if device.physical_id is None:
2608 a8083063 Iustin Pop
    device.physical_id = new_id
2609 a8083063 Iustin Pop
  return True
2610 a8083063 Iustin Pop
2611 a8083063 Iustin Pop
2612 923b1523 Iustin Pop
def _GenerateUniqueNames(cfg, exts):
2613 923b1523 Iustin Pop
  """Generate a suitable LV name.
2614 923b1523 Iustin Pop

2615 923b1523 Iustin Pop
  This will generate a logical volume name for the given instance.
2616 923b1523 Iustin Pop

2617 923b1523 Iustin Pop
  """
2618 923b1523 Iustin Pop
  results = []
2619 923b1523 Iustin Pop
  for val in exts:
2620 923b1523 Iustin Pop
    new_id = cfg.GenerateUniqueID()
2621 923b1523 Iustin Pop
    results.append("%s%s" % (new_id, val))
2622 923b1523 Iustin Pop
  return results
2623 923b1523 Iustin Pop
2624 923b1523 Iustin Pop
2625 923b1523 Iustin Pop
def _GenerateMDDRBDBranch(cfg, primary, secondary, size, names):
2626 a8083063 Iustin Pop
  """Generate a drbd device complete with its children.
2627 a8083063 Iustin Pop

2628 a8083063 Iustin Pop
  """
2629 a8083063 Iustin Pop
  port = cfg.AllocatePort()
2630 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2631 fe96220b Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2632 923b1523 Iustin Pop
                          logical_id=(vgname, names[0]))
2633 fe96220b Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2634 923b1523 Iustin Pop
                          logical_id=(vgname, names[1]))
2635 fe96220b Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD7, size=size,
2636 a8083063 Iustin Pop
                          logical_id = (primary, secondary, port),
2637 a8083063 Iustin Pop
                          children = [dev_data, dev_meta])
2638 a8083063 Iustin Pop
  return drbd_dev
2639 a8083063 Iustin Pop
2640 a8083063 Iustin Pop
2641 a1f445d3 Iustin Pop
def _GenerateDRBD8Branch(cfg, primary, secondary, size, names, iv_name):
2642 a1f445d3 Iustin Pop
  """Generate a drbd8 device complete with its children.
2643 a1f445d3 Iustin Pop

2644 a1f445d3 Iustin Pop
  """
2645 a1f445d3 Iustin Pop
  port = cfg.AllocatePort()
2646 a1f445d3 Iustin Pop
  vgname = cfg.GetVGName()
2647 a1f445d3 Iustin Pop
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
2648 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[0]))
2649 a1f445d3 Iustin Pop
  dev_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
2650 a1f445d3 Iustin Pop
                          logical_id=(vgname, names[1]))
2651 a1f445d3 Iustin Pop
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
2652 a1f445d3 Iustin Pop
                          logical_id = (primary, secondary, port),
2653 a1f445d3 Iustin Pop
                          children = [dev_data, dev_meta],
2654 a1f445d3 Iustin Pop
                          iv_name=iv_name)
2655 a1f445d3 Iustin Pop
  return drbd_dev
2656 a1f445d3 Iustin Pop
2657 923b1523 Iustin Pop
def _GenerateDiskTemplate(cfg, template_name,
2658 a8083063 Iustin Pop
                          instance_name, primary_node,
2659 a8083063 Iustin Pop
                          secondary_nodes, disk_sz, swap_sz):
2660 a8083063 Iustin Pop
  """Generate the entire disk layout for a given template type.
2661 a8083063 Iustin Pop

2662 a8083063 Iustin Pop
  """
2663 a8083063 Iustin Pop
  #TODO: compute space requirements
2664 a8083063 Iustin Pop
2665 923b1523 Iustin Pop
  vgname = cfg.GetVGName()
2666 a8083063 Iustin Pop
  if template_name == "diskless":
2667 a8083063 Iustin Pop
    disks = []
2668 a8083063 Iustin Pop
  elif template_name == "plain":
2669 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2670 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2671 923b1523 Iustin Pop
2672 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda", ".sdb"])
2673 fe96220b Iustin Pop
    sda_dev = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2674 923b1523 Iustin Pop
                           logical_id=(vgname, names[0]),
2675 a8083063 Iustin Pop
                           iv_name = "sda")
2676 fe96220b Iustin Pop
    sdb_dev = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2677 923b1523 Iustin Pop
                           logical_id=(vgname, names[1]),
2678 a8083063 Iustin Pop
                           iv_name = "sdb")
2679 a8083063 Iustin Pop
    disks = [sda_dev, sdb_dev]
2680 a8083063 Iustin Pop
  elif template_name == "local_raid1":
2681 a8083063 Iustin Pop
    if len(secondary_nodes) != 0:
2682 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2683 923b1523 Iustin Pop
2684 923b1523 Iustin Pop
2685 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_m1", ".sda_m2",
2686 923b1523 Iustin Pop
                                       ".sdb_m1", ".sdb_m2"])
2687 fe96220b Iustin Pop
    sda_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2688 923b1523 Iustin Pop
                              logical_id=(vgname, names[0]))
2689 fe96220b Iustin Pop
    sda_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=disk_sz,
2690 923b1523 Iustin Pop
                              logical_id=(vgname, names[1]))
2691 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sda",
2692 a8083063 Iustin Pop
                              size=disk_sz,
2693 a8083063 Iustin Pop
                              children = [sda_dev_m1, sda_dev_m2])
2694 fe96220b Iustin Pop
    sdb_dev_m1 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2695 923b1523 Iustin Pop
                              logical_id=(vgname, names[2]))
2696 fe96220b Iustin Pop
    sdb_dev_m2 = objects.Disk(dev_type=constants.LD_LV, size=swap_sz,
2697 923b1523 Iustin Pop
                              logical_id=(vgname, names[3]))
2698 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name = "sdb",
2699 a8083063 Iustin Pop
                              size=swap_sz,
2700 a8083063 Iustin Pop
                              children = [sdb_dev_m1, sdb_dev_m2])
2701 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2702 2a710df1 Michael Hanselmann
  elif template_name == constants.DT_REMOTE_RAID1:
2703 a8083063 Iustin Pop
    if len(secondary_nodes) != 1:
2704 a8083063 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2705 a8083063 Iustin Pop
    remote_node = secondary_nodes[0]
2706 923b1523 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2707 923b1523 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2708 923b1523 Iustin Pop
    drbd_sda_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2709 923b1523 Iustin Pop
                                         disk_sz, names[0:2])
2710 fe96220b Iustin Pop
    md_sda_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sda",
2711 a8083063 Iustin Pop
                              children = [drbd_sda_dev], size=disk_sz)
2712 923b1523 Iustin Pop
    drbd_sdb_dev = _GenerateMDDRBDBranch(cfg, primary_node, remote_node,
2713 923b1523 Iustin Pop
                                         swap_sz, names[2:4])
2714 fe96220b Iustin Pop
    md_sdb_dev = objects.Disk(dev_type=constants.LD_MD_R1, iv_name="sdb",
2715 a8083063 Iustin Pop
                              children = [drbd_sdb_dev], size=swap_sz)
2716 a8083063 Iustin Pop
    disks = [md_sda_dev, md_sdb_dev]
2717 a1f445d3 Iustin Pop
  elif template_name == constants.DT_DRBD8:
2718 a1f445d3 Iustin Pop
    if len(secondary_nodes) != 1:
2719 a1f445d3 Iustin Pop
      raise errors.ProgrammerError("Wrong template configuration")
2720 a1f445d3 Iustin Pop
    remote_node = secondary_nodes[0]
2721 a1f445d3 Iustin Pop
    names = _GenerateUniqueNames(cfg, [".sda_data", ".sda_meta",
2722 a1f445d3 Iustin Pop
                                       ".sdb_data", ".sdb_meta"])
2723 a1f445d3 Iustin Pop
    drbd_sda_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2724 a1f445d3 Iustin Pop
                                         disk_sz, names[0:2], "sda")
2725 a1f445d3 Iustin Pop
    drbd_sdb_dev = _GenerateDRBD8Branch(cfg, primary_node, remote_node,
2726 a1f445d3 Iustin Pop
                                         swap_sz, names[2:4], "sdb")
2727 a1f445d3 Iustin Pop
    disks = [drbd_sda_dev, drbd_sdb_dev]
2728 a8083063 Iustin Pop
  else:
2729 a8083063 Iustin Pop
    raise errors.ProgrammerError("Invalid disk template '%s'" % template_name)
2730 a8083063 Iustin Pop
  return disks
2731 a8083063 Iustin Pop
2732 a8083063 Iustin Pop
2733 a0c3fea1 Michael Hanselmann
def _GetInstanceInfoText(instance):
2734 3ecf6786 Iustin Pop
  """Compute that text that should be added to the disk's metadata.
2735 3ecf6786 Iustin Pop

2736 3ecf6786 Iustin Pop
  """
2737 a0c3fea1 Michael Hanselmann
  return "originstname+%s" % instance.name
2738 a0c3fea1 Michael Hanselmann
2739 a0c3fea1 Michael Hanselmann
2740 a8083063 Iustin Pop
def _CreateDisks(cfg, instance):
2741 a8083063 Iustin Pop
  """Create all disks for an instance.
2742 a8083063 Iustin Pop

2743 a8083063 Iustin Pop
  This abstracts away some work from AddInstance.
2744 a8083063 Iustin Pop

2745 a8083063 Iustin Pop
  Args:
2746 a8083063 Iustin Pop
    instance: the instance object
2747 a8083063 Iustin Pop

2748 a8083063 Iustin Pop
  Returns:
2749 a8083063 Iustin Pop
    True or False showing the success of the creation process
2750 a8083063 Iustin Pop

2751 a8083063 Iustin Pop
  """
2752 a0c3fea1 Michael Hanselmann
  info = _GetInstanceInfoText(instance)
2753 a0c3fea1 Michael Hanselmann
2754 a8083063 Iustin Pop
  for device in instance.disks:
2755 a8083063 Iustin Pop
    logger.Info("creating volume %s for instance %s" %
2756 a8083063 Iustin Pop
              (device.iv_name, instance.name))
2757 a8083063 Iustin Pop
    #HARDCODE
2758 a8083063 Iustin Pop
    for secondary_node in instance.secondary_nodes:
2759 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, secondary_node, instance,
2760 3f78eef2 Iustin Pop
                                        device, False, info):
2761 a8083063 Iustin Pop
        logger.Error("failed to create volume %s (%s) on secondary node %s!" %
2762 a8083063 Iustin Pop
                     (device.iv_name, device, secondary_node))
2763 a8083063 Iustin Pop
        return False
2764 a8083063 Iustin Pop
    #HARDCODE
2765 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
2766 3f78eef2 Iustin Pop
                                    instance, device, info):
2767 a8083063 Iustin Pop
      logger.Error("failed to create volume %s on primary!" %
2768 a8083063 Iustin Pop
                   device.iv_name)
2769 a8083063 Iustin Pop
      return False
2770 a8083063 Iustin Pop
  return True
2771 a8083063 Iustin Pop
2772 a8083063 Iustin Pop
2773 a8083063 Iustin Pop
def _RemoveDisks(instance, cfg):
2774 a8083063 Iustin Pop
  """Remove all disks for an instance.
2775 a8083063 Iustin Pop

2776 a8083063 Iustin Pop
  This abstracts away some work from `AddInstance()` and
2777 a8083063 Iustin Pop
  `RemoveInstance()`. Note that in case some of the devices couldn't
2778 1d67656e Iustin Pop
  be removed, the removal will continue with the other ones (compare
2779 a8083063 Iustin Pop
  with `_CreateDisks()`).
2780 a8083063 Iustin Pop

2781 a8083063 Iustin Pop
  Args:
2782 a8083063 Iustin Pop
    instance: the instance object
2783 a8083063 Iustin Pop

2784 a8083063 Iustin Pop
  Returns:
2785 a8083063 Iustin Pop
    True or False showing the success of the removal proces
2786 a8083063 Iustin Pop

2787 a8083063 Iustin Pop
  """
2788 a8083063 Iustin Pop
  logger.Info("removing block devices for instance %s" % instance.name)
2789 a8083063 Iustin Pop
2790 a8083063 Iustin Pop
  result = True
2791 a8083063 Iustin Pop
  for device in instance.disks:
2792 a8083063 Iustin Pop
    for node, disk in device.ComputeNodeTree(instance.primary_node):
2793 a8083063 Iustin Pop
      cfg.SetDiskID(disk, node)
2794 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, disk):
2795 a8083063 Iustin Pop
        logger.Error("could not remove block device %s on node %s,"
2796 a8083063 Iustin Pop
                     " continuing anyway" %
2797 a8083063 Iustin Pop
                     (device.iv_name, node))
2798 a8083063 Iustin Pop
        result = False
2799 a8083063 Iustin Pop
  return result
2800 a8083063 Iustin Pop
2801 a8083063 Iustin Pop
2802 a8083063 Iustin Pop
class LUCreateInstance(LogicalUnit):
2803 a8083063 Iustin Pop
  """Create an instance.
2804 a8083063 Iustin Pop

2805 a8083063 Iustin Pop
  """
2806 a8083063 Iustin Pop
  HPATH = "instance-add"
2807 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
2808 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "mem_size", "disk_size", "pnode",
2809 a8083063 Iustin Pop
              "disk_template", "swap_size", "mode", "start", "vcpus",
2810 bdd55f71 Iustin Pop
              "wait_for_sync", "ip_check"]
2811 a8083063 Iustin Pop
2812 a8083063 Iustin Pop
  def BuildHooksEnv(self):
2813 a8083063 Iustin Pop
    """Build hooks env.
2814 a8083063 Iustin Pop

2815 a8083063 Iustin Pop
    This runs on master, primary and secondary nodes of the instance.
2816 a8083063 Iustin Pop

2817 a8083063 Iustin Pop
    """
2818 a8083063 Iustin Pop
    env = {
2819 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_TEMPLATE": self.op.disk_template,
2820 396e1b78 Michael Hanselmann
      "INSTANCE_DISK_SIZE": self.op.disk_size,
2821 396e1b78 Michael Hanselmann
      "INSTANCE_SWAP_SIZE": self.op.swap_size,
2822 a8083063 Iustin Pop
      "INSTANCE_ADD_MODE": self.op.mode,
2823 a8083063 Iustin Pop
      }
2824 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2825 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_NODE"] = self.op.src_node
2826 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_PATH"] = self.op.src_path
2827 396e1b78 Michael Hanselmann
      env["INSTANCE_SRC_IMAGE"] = self.src_image
2828 396e1b78 Michael Hanselmann
2829 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnv(name=self.op.instance_name,
2830 396e1b78 Michael Hanselmann
      primary_node=self.op.pnode,
2831 396e1b78 Michael Hanselmann
      secondary_nodes=self.secondaries,
2832 396e1b78 Michael Hanselmann
      status=self.instance_status,
2833 ecb215b5 Michael Hanselmann
      os_type=self.op.os_type,
2834 396e1b78 Michael Hanselmann
      memory=self.op.mem_size,
2835 396e1b78 Michael Hanselmann
      vcpus=self.op.vcpus,
2836 396e1b78 Michael Hanselmann
      nics=[(self.inst_ip, self.op.bridge)],
2837 396e1b78 Michael Hanselmann
    ))
2838 a8083063 Iustin Pop
2839 880478f8 Iustin Pop
    nl = ([self.sstore.GetMasterNode(), self.op.pnode] +
2840 a8083063 Iustin Pop
          self.secondaries)
2841 a8083063 Iustin Pop
    return env, nl, nl
2842 a8083063 Iustin Pop
2843 a8083063 Iustin Pop
2844 a8083063 Iustin Pop
  def CheckPrereq(self):
2845 a8083063 Iustin Pop
    """Check prerequisites.
2846 a8083063 Iustin Pop

2847 a8083063 Iustin Pop
    """
2848 a8083063 Iustin Pop
    if self.op.mode not in (constants.INSTANCE_CREATE,
2849 a8083063 Iustin Pop
                            constants.INSTANCE_IMPORT):
2850 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid instance creation mode '%s'" %
2851 3ecf6786 Iustin Pop
                                 self.op.mode)
2852 a8083063 Iustin Pop
2853 a8083063 Iustin Pop
    if self.op.mode == constants.INSTANCE_IMPORT:
2854 a8083063 Iustin Pop
      src_node = getattr(self.op, "src_node", None)
2855 a8083063 Iustin Pop
      src_path = getattr(self.op, "src_path", None)
2856 a8083063 Iustin Pop
      if src_node is None or src_path is None:
2857 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Importing an instance requires source"
2858 3ecf6786 Iustin Pop
                                   " node and path options")
2859 a8083063 Iustin Pop
      src_node_full = self.cfg.ExpandNodeName(src_node)
2860 a8083063 Iustin Pop
      if src_node_full is None:
2861 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown source node '%s'" % src_node)
2862 a8083063 Iustin Pop
      self.op.src_node = src_node = src_node_full
2863 a8083063 Iustin Pop
2864 a8083063 Iustin Pop
      if not os.path.isabs(src_path):
2865 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The source path must be absolute")
2866 a8083063 Iustin Pop
2867 a8083063 Iustin Pop
      export_info = rpc.call_export_info(src_node, src_path)
2868 a8083063 Iustin Pop
2869 a8083063 Iustin Pop
      if not export_info:
2870 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No export found in dir %s" % src_path)
2871 a8083063 Iustin Pop
2872 a8083063 Iustin Pop
      if not export_info.has_section(constants.INISECT_EXP):
2873 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Corrupted export config")
2874 a8083063 Iustin Pop
2875 a8083063 Iustin Pop
      ei_version = export_info.get(constants.INISECT_EXP, 'version')
2876 a8083063 Iustin Pop
      if (int(ei_version) != constants.EXPORT_VERSION):
2877 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Wrong export version %s (wanted %d)" %
2878 3ecf6786 Iustin Pop
                                   (ei_version, constants.EXPORT_VERSION))
2879 a8083063 Iustin Pop
2880 a8083063 Iustin Pop
      if int(export_info.get(constants.INISECT_INS, 'disk_count')) > 1:
2881 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Can't import instance with more than"
2882 3ecf6786 Iustin Pop
                                   " one data disk")
2883 a8083063 Iustin Pop
2884 a8083063 Iustin Pop
      # FIXME: are the old os-es, disk sizes, etc. useful?
2885 a8083063 Iustin Pop
      self.op.os_type = export_info.get(constants.INISECT_EXP, 'os')
2886 a8083063 Iustin Pop
      diskimage = os.path.join(src_path, export_info.get(constants.INISECT_INS,
2887 a8083063 Iustin Pop
                                                         'disk0_dump'))
2888 a8083063 Iustin Pop
      self.src_image = diskimage
2889 a8083063 Iustin Pop
    else: # INSTANCE_CREATE
2890 a8083063 Iustin Pop
      if getattr(self.op, "os_type", None) is None:
2891 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("No guest OS specified")
2892 a8083063 Iustin Pop
2893 a8083063 Iustin Pop
    # check primary node
2894 a8083063 Iustin Pop
    pnode = self.cfg.GetNodeInfo(self.cfg.ExpandNodeName(self.op.pnode))
2895 a8083063 Iustin Pop
    if pnode is None:
2896 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Primary node '%s' is unknown" %
2897 3ecf6786 Iustin Pop
                                 self.op.pnode)
2898 a8083063 Iustin Pop
    self.op.pnode = pnode.name
2899 a8083063 Iustin Pop
    self.pnode = pnode
2900 a8083063 Iustin Pop
    self.secondaries = []
2901 a8083063 Iustin Pop
    # disk template and mirror node verification
2902 a8083063 Iustin Pop
    if self.op.disk_template not in constants.DISK_TEMPLATES:
2903 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid disk template name")
2904 a8083063 Iustin Pop
2905 a1f445d3 Iustin Pop
    if self.op.disk_template in constants.DTS_NET_MIRROR:
2906 a8083063 Iustin Pop
      if getattr(self.op, "snode", None) is None:
2907 a1f445d3 Iustin Pop
        raise errors.OpPrereqError("The networked disk templates need"
2908 3ecf6786 Iustin Pop
                                   " a mirror node")
2909 a8083063 Iustin Pop
2910 a8083063 Iustin Pop
      snode_name = self.cfg.ExpandNodeName(self.op.snode)
2911 a8083063 Iustin Pop
      if snode_name is None:
2912 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Unknown secondary node '%s'" %
2913 3ecf6786 Iustin Pop
                                   self.op.snode)
2914 a8083063 Iustin Pop
      elif snode_name == pnode.name:
2915 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("The secondary node cannot be"
2916 3ecf6786 Iustin Pop
                                   " the primary node.")
2917 a8083063 Iustin Pop
      self.secondaries.append(snode_name)
2918 a8083063 Iustin Pop
2919 ed1ebc60 Guido Trotter
    # Check lv size requirements
2920 ed1ebc60 Guido Trotter
    nodenames = [pnode.name] + self.secondaries
2921 ed1ebc60 Guido Trotter
    nodeinfo = rpc.call_node_info(nodenames, self.cfg.GetVGName())
2922 ed1ebc60 Guido Trotter
2923 ed1ebc60 Guido Trotter
    # Required free disk space as a function of disk and swap space
2924 ed1ebc60 Guido Trotter
    req_size_dict = {
2925 ed1ebc60 Guido Trotter
      constants.DT_DISKLESS: 0,
2926 ed1ebc60 Guido Trotter
      constants.DT_PLAIN: self.op.disk_size + self.op.swap_size,
2927 ed1ebc60 Guido Trotter
      constants.DT_LOCAL_RAID1: (self.op.disk_size + self.op.swap_size) * 2,
2928 ed1ebc60 Guido Trotter
      # 256 MB are added for drbd metadata, 128MB for each drbd device
2929 ed1ebc60 Guido Trotter
      constants.DT_REMOTE_RAID1: self.op.disk_size + self.op.swap_size + 256,
2930 a1f445d3 Iustin Pop
      constants.DT_DRBD8: self.op.disk_size + self.op.swap_size + 256,
2931 ed1ebc60 Guido Trotter
    }
2932 ed1ebc60 Guido Trotter
2933 ed1ebc60 Guido Trotter
    if self.op.disk_template not in req_size_dict:
2934 3ecf6786 Iustin Pop
      raise errors.ProgrammerError("Disk template '%s' size requirement"
2935 3ecf6786 Iustin Pop
                                   " is unknown" %  self.op.disk_template)
2936 ed1ebc60 Guido Trotter
2937 ed1ebc60 Guido Trotter
    req_size = req_size_dict[self.op.disk_template]
2938 ed1ebc60 Guido Trotter
2939 ed1ebc60 Guido Trotter
    for node in nodenames:
2940 ed1ebc60 Guido Trotter
      info = nodeinfo.get(node, None)
2941 ed1ebc60 Guido Trotter
      if not info:
2942 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Cannot get current information"
2943 3ecf6786 Iustin Pop
                                   " from node '%s'" % nodeinfo)
2944 ed1ebc60 Guido Trotter
      if req_size > info['vg_free']:
2945 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Not enough disk space on target node %s."
2946 3ecf6786 Iustin Pop
                                   " %d MB available, %d MB required" %
2947 3ecf6786 Iustin Pop
                                   (node, info['vg_free'], req_size))
2948 ed1ebc60 Guido Trotter
2949 a8083063 Iustin Pop
    # os verification
2950 00fe9e38 Guido Trotter
    os_obj = rpc.call_os_get(pnode.name, self.op.os_type)
2951 dfa96ded Guido Trotter
    if not os_obj:
2952 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("OS '%s' not in supported os list for"
2953 3ecf6786 Iustin Pop
                                 " primary node"  % self.op.os_type)
2954 a8083063 Iustin Pop
2955 a8083063 Iustin Pop
    # instance verification
2956 89e1fc26 Iustin Pop
    hostname1 = utils.HostInfo(self.op.instance_name)
2957 a8083063 Iustin Pop
2958 bcf043c9 Iustin Pop
    self.op.instance_name = instance_name = hostname1.name
2959 a8083063 Iustin Pop
    instance_list = self.cfg.GetInstanceList()
2960 a8083063 Iustin Pop
    if instance_name in instance_list:
2961 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
2962 3ecf6786 Iustin Pop
                                 instance_name)
2963 a8083063 Iustin Pop
2964 a8083063 Iustin Pop
    ip = getattr(self.op, "ip", None)
2965 a8083063 Iustin Pop
    if ip is None or ip.lower() == "none":
2966 a8083063 Iustin Pop
      inst_ip = None
2967 a8083063 Iustin Pop
    elif ip.lower() == "auto":
2968 bcf043c9 Iustin Pop
      inst_ip = hostname1.ip
2969 a8083063 Iustin Pop
    else:
2970 a8083063 Iustin Pop
      if not utils.IsValidIP(ip):
2971 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("given IP address '%s' doesn't look"
2972 3ecf6786 Iustin Pop
                                   " like a valid IP" % ip)
2973 a8083063 Iustin Pop
      inst_ip = ip
2974 a8083063 Iustin Pop
    self.inst_ip = inst_ip
2975 a8083063 Iustin Pop
2976 bdd55f71 Iustin Pop
    if self.op.start and not self.op.ip_check:
2977 bdd55f71 Iustin Pop
      raise errors.OpPrereqError("Cannot ignore IP address conflicts when"
2978 bdd55f71 Iustin Pop
                                 " adding an instance in start mode")
2979 bdd55f71 Iustin Pop
2980 bdd55f71 Iustin Pop
    if self.op.ip_check:
2981 16abfbc2 Alexander Schreiber
      if utils.TcpPing(utils.HostInfo().name, hostname1.ip,
2982 16abfbc2 Alexander Schreiber
                       constants.DEFAULT_NODED_PORT):
2983 16abfbc2 Alexander Schreiber
        raise errors.OpPrereqError("IP %s of instance %s already in use" %
2984 16abfbc2 Alexander Schreiber
                                   (hostname1.ip, instance_name))
2985 a8083063 Iustin Pop
2986 a8083063 Iustin Pop
    # bridge verification
2987 a8083063 Iustin Pop
    bridge = getattr(self.op, "bridge", None)
2988 a8083063 Iustin Pop
    if bridge is None:
2989 a8083063 Iustin Pop
      self.op.bridge = self.cfg.GetDefBridge()
2990 a8083063 Iustin Pop
    else:
2991 a8083063 Iustin Pop
      self.op.bridge = bridge
2992 a8083063 Iustin Pop
2993 a8083063 Iustin Pop
    if not rpc.call_bridges_exist(self.pnode.name, [self.op.bridge]):
2994 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("target bridge '%s' does not exist on"
2995 3ecf6786 Iustin Pop
                                 " destination node '%s'" %
2996 3ecf6786 Iustin Pop
                                 (self.op.bridge, pnode.name))
2997 a8083063 Iustin Pop
2998 a8083063 Iustin Pop
    if self.op.start:
2999 a8083063 Iustin Pop
      self.instance_status = 'up'
3000 a8083063 Iustin Pop
    else:
3001 a8083063 Iustin Pop
      self.instance_status = 'down'
3002 a8083063 Iustin Pop
3003 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3004 a8083063 Iustin Pop
    """Create and add the instance to the cluster.
3005 a8083063 Iustin Pop

3006 a8083063 Iustin Pop
    """
3007 a8083063 Iustin Pop
    instance = self.op.instance_name
3008 a8083063 Iustin Pop
    pnode_name = self.pnode.name
3009 a8083063 Iustin Pop
3010 a8083063 Iustin Pop
    nic = objects.NIC(bridge=self.op.bridge, mac=self.cfg.GenerateMAC())
3011 a8083063 Iustin Pop
    if self.inst_ip is not None:
3012 a8083063 Iustin Pop
      nic.ip = self.inst_ip
3013 a8083063 Iustin Pop
3014 923b1523 Iustin Pop
    disks = _GenerateDiskTemplate(self.cfg,
3015 a8083063 Iustin Pop
                                  self.op.disk_template,
3016 a8083063 Iustin Pop
                                  instance, pnode_name,
3017 a8083063 Iustin Pop
                                  self.secondaries, self.op.disk_size,
3018 a8083063 Iustin Pop
                                  self.op.swap_size)
3019 a8083063 Iustin Pop
3020 a8083063 Iustin Pop
    iobj = objects.Instance(name=instance, os=self.op.os_type,
3021 a8083063 Iustin Pop
                            primary_node=pnode_name,
3022 a8083063 Iustin Pop
                            memory=self.op.mem_size,
3023 a8083063 Iustin Pop
                            vcpus=self.op.vcpus,
3024 a8083063 Iustin Pop
                            nics=[nic], disks=disks,
3025 a8083063 Iustin Pop
                            disk_template=self.op.disk_template,
3026 a8083063 Iustin Pop
                            status=self.instance_status,
3027 a8083063 Iustin Pop
                            )
3028 a8083063 Iustin Pop
3029 a8083063 Iustin Pop
    feedback_fn("* creating instance disks...")
3030 a8083063 Iustin Pop
    if not _CreateDisks(self.cfg, iobj):
3031 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3032 3ecf6786 Iustin Pop
      raise errors.OpExecError("Device creation failed, reverting...")
3033 a8083063 Iustin Pop
3034 a8083063 Iustin Pop
    feedback_fn("adding instance %s to cluster config" % instance)
3035 a8083063 Iustin Pop
3036 a8083063 Iustin Pop
    self.cfg.AddInstance(iobj)
3037 a8083063 Iustin Pop
3038 a8083063 Iustin Pop
    if self.op.wait_for_sync:
3039 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj)
3040 a1f445d3 Iustin Pop
    elif iobj.disk_template in constants.DTS_NET_MIRROR:
3041 a8083063 Iustin Pop
      # make sure the disks are not degraded (still sync-ing is ok)
3042 a8083063 Iustin Pop
      time.sleep(15)
3043 a8083063 Iustin Pop
      feedback_fn("* checking mirrors status")
3044 a8083063 Iustin Pop
      disk_abort = not _WaitForSync(self.cfg, iobj, oneshot=True)
3045 a8083063 Iustin Pop
    else:
3046 a8083063 Iustin Pop
      disk_abort = False
3047 a8083063 Iustin Pop
3048 a8083063 Iustin Pop
    if disk_abort:
3049 a8083063 Iustin Pop
      _RemoveDisks(iobj, self.cfg)
3050 a8083063 Iustin Pop
      self.cfg.RemoveInstance(iobj.name)
3051 3ecf6786 Iustin Pop
      raise errors.OpExecError("There are some degraded disks for"
3052 3ecf6786 Iustin Pop
                               " this instance")
3053 a8083063 Iustin Pop
3054 a8083063 Iustin Pop
    feedback_fn("creating os for instance %s on node %s" %
3055 a8083063 Iustin Pop
                (instance, pnode_name))
3056 a8083063 Iustin Pop
3057 a8083063 Iustin Pop
    if iobj.disk_template != constants.DT_DISKLESS:
3058 a8083063 Iustin Pop
      if self.op.mode == constants.INSTANCE_CREATE:
3059 a8083063 Iustin Pop
        feedback_fn("* running the instance OS create scripts...")
3060 a8083063 Iustin Pop
        if not rpc.call_instance_os_add(pnode_name, iobj, "sda", "sdb"):
3061 3ecf6786 Iustin Pop
          raise errors.OpExecError("could not add os for instance %s"
3062 3ecf6786 Iustin Pop
                                   " on node %s" %
3063 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3064 a8083063 Iustin Pop
3065 a8083063 Iustin Pop
      elif self.op.mode == constants.INSTANCE_IMPORT:
3066 a8083063 Iustin Pop
        feedback_fn("* running the instance OS import scripts...")
3067 a8083063 Iustin Pop
        src_node = self.op.src_node
3068 a8083063 Iustin Pop
        src_image = self.src_image
3069 a8083063 Iustin Pop
        if not rpc.call_instance_os_import(pnode_name, iobj, "sda", "sdb",
3070 a8083063 Iustin Pop
                                                src_node, src_image):
3071 3ecf6786 Iustin Pop
          raise errors.OpExecError("Could not import os for instance"
3072 3ecf6786 Iustin Pop
                                   " %s on node %s" %
3073 3ecf6786 Iustin Pop
                                   (instance, pnode_name))
3074 a8083063 Iustin Pop
      else:
3075 a8083063 Iustin Pop
        # also checked in the prereq part
3076 3ecf6786 Iustin Pop
        raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
3077 3ecf6786 Iustin Pop
                                     % self.op.mode)
3078 a8083063 Iustin Pop
3079 a8083063 Iustin Pop
    if self.op.start:
3080 a8083063 Iustin Pop
      logger.Info("starting instance %s on node %s" % (instance, pnode_name))
3081 a8083063 Iustin Pop
      feedback_fn("* starting instance...")
3082 a8083063 Iustin Pop
      if not rpc.call_instance_start(pnode_name, iobj, None):
3083 3ecf6786 Iustin Pop
        raise errors.OpExecError("Could not start instance")
3084 a8083063 Iustin Pop
3085 a8083063 Iustin Pop
3086 a8083063 Iustin Pop
class LUConnectConsole(NoHooksLU):
3087 a8083063 Iustin Pop
  """Connect to an instance's console.
3088 a8083063 Iustin Pop

3089 a8083063 Iustin Pop
  This is somewhat special in that it returns the command line that
3090 a8083063 Iustin Pop
  you need to run on the master node in order to connect to the
3091 a8083063 Iustin Pop
  console.
3092 a8083063 Iustin Pop

3093 a8083063 Iustin Pop
  """
3094 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3095 a8083063 Iustin Pop
3096 a8083063 Iustin Pop
  def CheckPrereq(self):
3097 a8083063 Iustin Pop
    """Check prerequisites.
3098 a8083063 Iustin Pop

3099 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3100 a8083063 Iustin Pop

3101 a8083063 Iustin Pop
    """
3102 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3103 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3104 a8083063 Iustin Pop
    if instance is None:
3105 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3106 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3107 a8083063 Iustin Pop
    self.instance = instance
3108 a8083063 Iustin Pop
3109 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3110 a8083063 Iustin Pop
    """Connect to the console of an instance
3111 a8083063 Iustin Pop

3112 a8083063 Iustin Pop
    """
3113 a8083063 Iustin Pop
    instance = self.instance
3114 a8083063 Iustin Pop
    node = instance.primary_node
3115 a8083063 Iustin Pop
3116 a8083063 Iustin Pop
    node_insts = rpc.call_instance_list([node])[node]
3117 a8083063 Iustin Pop
    if node_insts is False:
3118 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't connect to node %s." % node)
3119 a8083063 Iustin Pop
3120 a8083063 Iustin Pop
    if instance.name not in node_insts:
3121 3ecf6786 Iustin Pop
      raise errors.OpExecError("Instance %s is not running." % instance.name)
3122 a8083063 Iustin Pop
3123 a8083063 Iustin Pop
    logger.Debug("connecting to console of %s on %s" % (instance.name, node))
3124 a8083063 Iustin Pop
3125 a8083063 Iustin Pop
    hyper = hypervisor.GetHypervisor()
3126 a8083063 Iustin Pop
    console_cmd = hyper.GetShellCommandForConsole(instance.name)
3127 82122173 Iustin Pop
    # build ssh cmdline
3128 82122173 Iustin Pop
    argv = ["ssh", "-q", "-t"]
3129 82122173 Iustin Pop
    argv.extend(ssh.KNOWN_HOSTS_OPTS)
3130 82122173 Iustin Pop
    argv.extend(ssh.BATCH_MODE_OPTS)
3131 82122173 Iustin Pop
    argv.append(node)
3132 82122173 Iustin Pop
    argv.append(console_cmd)
3133 82122173 Iustin Pop
    return "ssh", argv
3134 a8083063 Iustin Pop
3135 a8083063 Iustin Pop
3136 a8083063 Iustin Pop
class LUAddMDDRBDComponent(LogicalUnit):
3137 a8083063 Iustin Pop
  """Adda new mirror member to an instance's disk.
3138 a8083063 Iustin Pop

3139 a8083063 Iustin Pop
  """
3140 a8083063 Iustin Pop
  HPATH = "mirror-add"
3141 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3142 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "remote_node", "disk_name"]
3143 a8083063 Iustin Pop
3144 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3145 a8083063 Iustin Pop
    """Build hooks env.
3146 a8083063 Iustin Pop

3147 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3148 a8083063 Iustin Pop

3149 a8083063 Iustin Pop
    """
3150 a8083063 Iustin Pop
    env = {
3151 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3152 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3153 a8083063 Iustin Pop
      }
3154 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3155 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
3156 a8083063 Iustin Pop
          self.op.remote_node,] + list(self.instance.secondary_nodes)
3157 a8083063 Iustin Pop
    return env, nl, nl
3158 a8083063 Iustin Pop
3159 a8083063 Iustin Pop
  def CheckPrereq(self):
3160 a8083063 Iustin Pop
    """Check prerequisites.
3161 a8083063 Iustin Pop

3162 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3163 a8083063 Iustin Pop

3164 a8083063 Iustin Pop
    """
3165 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3166 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3167 a8083063 Iustin Pop
    if instance is None:
3168 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3169 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3170 a8083063 Iustin Pop
    self.instance = instance
3171 a8083063 Iustin Pop
3172 a8083063 Iustin Pop
    remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
3173 a8083063 Iustin Pop
    if remote_node is None:
3174 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Node '%s' not known" % self.op.remote_node)
3175 a8083063 Iustin Pop
    self.remote_node = remote_node
3176 a8083063 Iustin Pop
3177 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3178 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3179 3ecf6786 Iustin Pop
                                 " the instance.")
3180 a8083063 Iustin Pop
3181 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3182 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3183 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3184 a8083063 Iustin Pop
    for disk in instance.disks:
3185 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3186 a8083063 Iustin Pop
        break
3187 a8083063 Iustin Pop
    else:
3188 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3189 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3190 a8083063 Iustin Pop
    if len(disk.children) > 1:
3191 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The device already has two slave"
3192 3ecf6786 Iustin Pop
                                 " devices.\n"
3193 3ecf6786 Iustin Pop
                                 "This would create a 3-disk raid1"
3194 3ecf6786 Iustin Pop
                                 " which we don't allow.")
3195 a8083063 Iustin Pop
    self.disk = disk
3196 a8083063 Iustin Pop
3197 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3198 a8083063 Iustin Pop
    """Add the mirror component
3199 a8083063 Iustin Pop

3200 a8083063 Iustin Pop
    """
3201 a8083063 Iustin Pop
    disk = self.disk
3202 a8083063 Iustin Pop
    instance = self.instance
3203 a8083063 Iustin Pop
3204 a8083063 Iustin Pop
    remote_node = self.remote_node
3205 923b1523 Iustin Pop
    lv_names = [".%s_%s" % (disk.iv_name, suf) for suf in ["data", "meta"]]
3206 923b1523 Iustin Pop
    names = _GenerateUniqueNames(self.cfg, lv_names)
3207 923b1523 Iustin Pop
    new_drbd = _GenerateMDDRBDBranch(self.cfg, instance.primary_node,
3208 923b1523 Iustin Pop
                                     remote_node, disk.size, names)
3209 a8083063 Iustin Pop
3210 a8083063 Iustin Pop
    logger.Info("adding new mirror component on secondary")
3211 a8083063 Iustin Pop
    #HARDCODE
3212 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnSecondary(self.cfg, remote_node, instance,
3213 3f78eef2 Iustin Pop
                                      new_drbd, False,
3214 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3215 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create new component on secondary"
3216 3ecf6786 Iustin Pop
                               " node %s" % remote_node)
3217 a8083063 Iustin Pop
3218 a8083063 Iustin Pop
    logger.Info("adding new mirror component on primary")
3219 a8083063 Iustin Pop
    #HARDCODE
3220 3f78eef2 Iustin Pop
    if not _CreateBlockDevOnPrimary(self.cfg, instance.primary_node,
3221 3f78eef2 Iustin Pop
                                    instance, new_drbd,
3222 a0c3fea1 Michael Hanselmann
                                    _GetInstanceInfoText(instance)):
3223 a8083063 Iustin Pop
      # remove secondary dev
3224 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3225 a8083063 Iustin Pop
      rpc.call_blockdev_remove(remote_node, new_drbd)
3226 3ecf6786 Iustin Pop
      raise errors.OpExecError("Failed to create volume on primary")
3227 a8083063 Iustin Pop
3228 a8083063 Iustin Pop
    # the device exists now
3229 a8083063 Iustin Pop
    # call the primary node to add the mirror to md
3230 a8083063 Iustin Pop
    logger.Info("adding new mirror component to md")
3231 153d9724 Iustin Pop
    if not rpc.call_blockdev_addchildren(instance.primary_node,
3232 153d9724 Iustin Pop
                                         disk, [new_drbd]):
3233 a8083063 Iustin Pop
      logger.Error("Can't add mirror compoment to md!")
3234 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, remote_node)
3235 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(remote_node, new_drbd):
3236 a8083063 Iustin Pop
        logger.Error("Can't rollback on secondary")
3237 a8083063 Iustin Pop
      self.cfg.SetDiskID(new_drbd, instance.primary_node)
3238 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3239 a8083063 Iustin Pop
        logger.Error("Can't rollback on primary")
3240 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't add mirror component to md array")
3241 a8083063 Iustin Pop
3242 a8083063 Iustin Pop
    disk.children.append(new_drbd)
3243 a8083063 Iustin Pop
3244 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3245 a8083063 Iustin Pop
3246 a8083063 Iustin Pop
    _WaitForSync(self.cfg, instance)
3247 a8083063 Iustin Pop
3248 a8083063 Iustin Pop
    return 0
3249 a8083063 Iustin Pop
3250 a8083063 Iustin Pop
3251 a8083063 Iustin Pop
class LURemoveMDDRBDComponent(LogicalUnit):
3252 a8083063 Iustin Pop
  """Remove a component from a remote_raid1 disk.
3253 a8083063 Iustin Pop

3254 a8083063 Iustin Pop
  """
3255 a8083063 Iustin Pop
  HPATH = "mirror-remove"
3256 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3257 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "disk_name", "disk_id"]
3258 a8083063 Iustin Pop
3259 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3260 a8083063 Iustin Pop
    """Build hooks env.
3261 a8083063 Iustin Pop

3262 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3263 a8083063 Iustin Pop

3264 a8083063 Iustin Pop
    """
3265 a8083063 Iustin Pop
    env = {
3266 a8083063 Iustin Pop
      "DISK_NAME": self.op.disk_name,
3267 a8083063 Iustin Pop
      "DISK_ID": self.op.disk_id,
3268 a8083063 Iustin Pop
      "OLD_SECONDARY": self.old_secondary,
3269 a8083063 Iustin Pop
      }
3270 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3271 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3272 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3273 a8083063 Iustin Pop
    return env, nl, nl
3274 a8083063 Iustin Pop
3275 a8083063 Iustin Pop
  def CheckPrereq(self):
3276 a8083063 Iustin Pop
    """Check prerequisites.
3277 a8083063 Iustin Pop

3278 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3279 a8083063 Iustin Pop

3280 a8083063 Iustin Pop
    """
3281 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3282 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3283 a8083063 Iustin Pop
    if instance is None:
3284 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3285 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3286 a8083063 Iustin Pop
    self.instance = instance
3287 a8083063 Iustin Pop
3288 a8083063 Iustin Pop
    if instance.disk_template != constants.DT_REMOTE_RAID1:
3289 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3290 3ecf6786 Iustin Pop
                                 " remote_raid1.")
3291 a8083063 Iustin Pop
    for disk in instance.disks:
3292 a8083063 Iustin Pop
      if disk.iv_name == self.op.disk_name:
3293 a8083063 Iustin Pop
        break
3294 a8083063 Iustin Pop
    else:
3295 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find this device ('%s') in the"
3296 3ecf6786 Iustin Pop
                                 " instance." % self.op.disk_name)
3297 a8083063 Iustin Pop
    for child in disk.children:
3298 fe96220b Iustin Pop
      if (child.dev_type == constants.LD_DRBD7 and
3299 fe96220b Iustin Pop
          child.logical_id[2] == self.op.disk_id):
3300 a8083063 Iustin Pop
        break
3301 a8083063 Iustin Pop
    else:
3302 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Can't find the device with this port.")
3303 a8083063 Iustin Pop
3304 a8083063 Iustin Pop
    if len(disk.children) < 2:
3305 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Cannot remove the last component from"
3306 3ecf6786 Iustin Pop
                                 " a mirror.")
3307 a8083063 Iustin Pop
    self.disk = disk
3308 a8083063 Iustin Pop
    self.child = child
3309 a8083063 Iustin Pop
    if self.child.logical_id[0] == instance.primary_node:
3310 a8083063 Iustin Pop
      oid = 1
3311 a8083063 Iustin Pop
    else:
3312 a8083063 Iustin Pop
      oid = 0
3313 a8083063 Iustin Pop
    self.old_secondary = self.child.logical_id[oid]
3314 a8083063 Iustin Pop
3315 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3316 a8083063 Iustin Pop
    """Remove the mirror component
3317 a8083063 Iustin Pop

3318 a8083063 Iustin Pop
    """
3319 a8083063 Iustin Pop
    instance = self.instance
3320 a8083063 Iustin Pop
    disk = self.disk
3321 a8083063 Iustin Pop
    child = self.child
3322 a8083063 Iustin Pop
    logger.Info("remove mirror component")
3323 a8083063 Iustin Pop
    self.cfg.SetDiskID(disk, instance.primary_node)
3324 153d9724 Iustin Pop
    if not rpc.call_blockdev_removechildren(instance.primary_node,
3325 153d9724 Iustin Pop
                                            disk, [child]):
3326 3ecf6786 Iustin Pop
      raise errors.OpExecError("Can't remove child from mirror.")
3327 a8083063 Iustin Pop
3328 a8083063 Iustin Pop
    for node in child.logical_id[:2]:
3329 a8083063 Iustin Pop
      self.cfg.SetDiskID(child, node)
3330 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(node, child):
3331 a8083063 Iustin Pop
        logger.Error("Warning: failed to remove device from node %s,"
3332 a8083063 Iustin Pop
                     " continuing operation." % node)
3333 a8083063 Iustin Pop
3334 a8083063 Iustin Pop
    disk.children.remove(child)
3335 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3336 a8083063 Iustin Pop
3337 a8083063 Iustin Pop
3338 a8083063 Iustin Pop
class LUReplaceDisks(LogicalUnit):
3339 a8083063 Iustin Pop
  """Replace the disks of an instance.
3340 a8083063 Iustin Pop

3341 a8083063 Iustin Pop
  """
3342 a8083063 Iustin Pop
  HPATH = "mirrors-replace"
3343 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3344 a9e0c397 Iustin Pop
  _OP_REQP = ["instance_name", "mode", "disks"]
3345 a8083063 Iustin Pop
3346 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3347 a8083063 Iustin Pop
    """Build hooks env.
3348 a8083063 Iustin Pop

3349 a8083063 Iustin Pop
    This runs on the master, the primary and all the secondaries.
3350 a8083063 Iustin Pop

3351 a8083063 Iustin Pop
    """
3352 a8083063 Iustin Pop
    env = {
3353 a9e0c397 Iustin Pop
      "MODE": self.op.mode,
3354 a8083063 Iustin Pop
      "NEW_SECONDARY": self.op.remote_node,
3355 a8083063 Iustin Pop
      "OLD_SECONDARY": self.instance.secondary_nodes[0],
3356 a8083063 Iustin Pop
      }
3357 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
3358 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3359 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3360 a8083063 Iustin Pop
    return env, nl, nl
3361 a8083063 Iustin Pop
3362 a8083063 Iustin Pop
  def CheckPrereq(self):
3363 a8083063 Iustin Pop
    """Check prerequisites.
3364 a8083063 Iustin Pop

3365 a8083063 Iustin Pop
    This checks that the instance is in the cluster.
3366 a8083063 Iustin Pop

3367 a8083063 Iustin Pop
    """
3368 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3369 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3370 a8083063 Iustin Pop
    if instance is None:
3371 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not known" %
3372 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3373 a8083063 Iustin Pop
    self.instance = instance
3374 a8083063 Iustin Pop
3375 a9e0c397 Iustin Pop
    if instance.disk_template not in constants.DTS_NET_MIRROR:
3376 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance's disk layout is not"
3377 a9e0c397 Iustin Pop
                                 " network mirrored.")
3378 a8083063 Iustin Pop
3379 a8083063 Iustin Pop
    if len(instance.secondary_nodes) != 1:
3380 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The instance has a strange layout,"
3381 3ecf6786 Iustin Pop
                                 " expected one secondary but found %d" %
3382 3ecf6786 Iustin Pop
                                 len(instance.secondary_nodes))
3383 a8083063 Iustin Pop
3384 a9e0c397 Iustin Pop
    self.sec_node = instance.secondary_nodes[0]
3385 a9e0c397 Iustin Pop
3386 a8083063 Iustin Pop
    remote_node = getattr(self.op, "remote_node", None)
3387 a9e0c397 Iustin Pop
    if remote_node is not None:
3388 a8083063 Iustin Pop
      remote_node = self.cfg.ExpandNodeName(remote_node)
3389 a8083063 Iustin Pop
      if remote_node is None:
3390 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Node '%s' not known" %
3391 3ecf6786 Iustin Pop
                                   self.op.remote_node)
3392 a9e0c397 Iustin Pop
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
3393 a9e0c397 Iustin Pop
    else:
3394 a9e0c397 Iustin Pop
      self.remote_node_info = None
3395 a8083063 Iustin Pop
    if remote_node == instance.primary_node:
3396 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("The specified node is the primary node of"
3397 3ecf6786 Iustin Pop
                                 " the instance.")
3398 a9e0c397 Iustin Pop
    elif remote_node == self.sec_node:
3399 a9e0c397 Iustin Pop
      # the user gave the current secondary, switch to
3400 a9e0c397 Iustin Pop
      # 'no-replace-secondary' mode
3401 a9e0c397 Iustin Pop
      remote_node = None
3402 a9e0c397 Iustin Pop
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3403 a9e0c397 Iustin Pop
        self.op.mode != constants.REPLACE_DISK_ALL):
3404 a9e0c397 Iustin Pop
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3405 a9e0c397 Iustin Pop
                                 " disks replacement, not individual ones")
3406 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_DRBD8:
3407 a9e0c397 Iustin Pop
      if self.op.mode == constants.REPLACE_DISK_ALL:
3408 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Template 'drbd8' only allows primary or"
3409 a9e0c397 Iustin Pop
                                   " secondary disk replacement, not"
3410 a9e0c397 Iustin Pop
                                   " both at once")
3411 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_PRI:
3412 a9e0c397 Iustin Pop
        if remote_node is not None:
3413 a9e0c397 Iustin Pop
          raise errors.OpPrereqError("Template 'drbd8' does not allow changing"
3414 a9e0c397 Iustin Pop
                                     " the secondary while doing a primary"
3415 a9e0c397 Iustin Pop
                                     " node disk replacement")
3416 a9e0c397 Iustin Pop
        self.tgt_node = instance.primary_node
3417 a9e0c397 Iustin Pop
      elif self.op.mode == constants.REPLACE_DISK_SEC:
3418 a9e0c397 Iustin Pop
        self.new_node = remote_node # this can be None, in which case
3419 a9e0c397 Iustin Pop
                                    # we don't change the secondary
3420 a9e0c397 Iustin Pop
        self.tgt_node = instance.secondary_nodes[0]
3421 a9e0c397 Iustin Pop
      else:
3422 a9e0c397 Iustin Pop
        raise errors.ProgrammerError("Unhandled disk replace mode")
3423 a9e0c397 Iustin Pop
3424 a9e0c397 Iustin Pop
    for name in self.op.disks:
3425 a9e0c397 Iustin Pop
      if instance.FindDisk(name) is None:
3426 a9e0c397 Iustin Pop
        raise errors.OpPrereqError("Disk '%s' not found for instance '%s'" %
3427 a9e0c397 Iustin Pop
                                   (name, instance.name))
3428 a8083063 Iustin Pop
    self.op.remote_node = remote_node
3429 a8083063 Iustin Pop
3430 a9e0c397 Iustin Pop
  def _ExecRR1(self, feedback_fn):
3431 a8083063 Iustin Pop
    """Replace the disks of an instance.
3432 a8083063 Iustin Pop

3433 a8083063 Iustin Pop
    """
3434 a8083063 Iustin Pop
    instance = self.instance
3435 a8083063 Iustin Pop
    iv_names = {}
3436 a8083063 Iustin Pop
    # start of work
3437 a9e0c397 Iustin Pop
    if self.op.remote_node is None:
3438 a9e0c397 Iustin Pop
      remote_node = self.sec_node
3439 a9e0c397 Iustin Pop
    else:
3440 a9e0c397 Iustin Pop
      remote_node = self.op.remote_node
3441 a8083063 Iustin Pop
    cfg = self.cfg
3442 a8083063 Iustin Pop
    for dev in instance.disks:
3443 a8083063 Iustin Pop
      size = dev.size
3444 923b1523 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3445 923b1523 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3446 923b1523 Iustin Pop
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3447 923b1523 Iustin Pop
                                       remote_node, size, names)
3448 a8083063 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3449 a8083063 Iustin Pop
      logger.Info("adding new mirror component on secondary for %s" %
3450 a8083063 Iustin Pop
                  dev.iv_name)
3451 a8083063 Iustin Pop
      #HARDCODE
3452 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3453 3f78eef2 Iustin Pop
                                        new_drbd, False,
3454 a0c3fea1 Michael Hanselmann
                                        _GetInstanceInfoText(instance)):
3455 3ecf6786 Iustin Pop
        raise errors.OpExecError("Failed to create new component on"
3456 3ecf6786 Iustin Pop
                                 " secondary node %s\n"
3457 3ecf6786 Iustin Pop
                                 "Full abort, cleanup manually!" %
3458 3ecf6786 Iustin Pop
                                 remote_node)
3459 a8083063 Iustin Pop
3460 a8083063 Iustin Pop
      logger.Info("adding new mirror component on primary")
3461 a8083063 Iustin Pop
      #HARDCODE
3462 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3463 3f78eef2 Iustin Pop
                                      instance, new_drbd,
3464 a0c3fea1 Michael Hanselmann
                                      _GetInstanceInfoText(instance)):
3465 a8083063 Iustin Pop
        # remove secondary dev
3466 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3467 a8083063 Iustin Pop
        rpc.call_blockdev_remove(remote_node, new_drbd)
3468 a8083063 Iustin Pop
        raise errors.OpExecError("Failed to create volume on primary!\n"
3469 a8083063 Iustin Pop
                                 "Full abort, cleanup manually!!")
3470 a8083063 Iustin Pop
3471 a8083063 Iustin Pop
      # the device exists now
3472 a8083063 Iustin Pop
      # call the primary node to add the mirror to md
3473 a8083063 Iustin Pop
      logger.Info("adding new mirror component to md")
3474 153d9724 Iustin Pop
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3475 153d9724 Iustin Pop
                                           [new_drbd]):
3476 a8083063 Iustin Pop
        logger.Error("Can't add mirror compoment to md!")
3477 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, remote_node)
3478 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3479 a8083063 Iustin Pop
          logger.Error("Can't rollback on secondary")
3480 a8083063 Iustin Pop
        cfg.SetDiskID(new_drbd, instance.primary_node)
3481 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3482 a8083063 Iustin Pop
          logger.Error("Can't rollback on primary")
3483 3ecf6786 Iustin Pop
        raise errors.OpExecError("Full abort, cleanup manually!!")
3484 a8083063 Iustin Pop
3485 a8083063 Iustin Pop
      dev.children.append(new_drbd)
3486 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3487 a8083063 Iustin Pop
3488 a8083063 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3489 a8083063 Iustin Pop
    # does a combined result over all disks, so we don't check its
3490 a8083063 Iustin Pop
    # return value
3491 a8083063 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3492 a8083063 Iustin Pop
3493 a8083063 Iustin Pop
    # so check manually all the devices
3494 a8083063 Iustin Pop
    for name in iv_names:
3495 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3496 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3497 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3498 a8083063 Iustin Pop
      if is_degr:
3499 3ecf6786 Iustin Pop
        raise errors.OpExecError("MD device %s is degraded!" % name)
3500 a8083063 Iustin Pop
      cfg.SetDiskID(new_drbd, instance.primary_node)
3501 a8083063 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3502 a8083063 Iustin Pop
      if is_degr:
3503 3ecf6786 Iustin Pop
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3504 a8083063 Iustin Pop
3505 a8083063 Iustin Pop
    for name in iv_names:
3506 a8083063 Iustin Pop
      dev, child, new_drbd = iv_names[name]
3507 a8083063 Iustin Pop
      logger.Info("remove mirror %s component" % name)
3508 a8083063 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3509 153d9724 Iustin Pop
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3510 153d9724 Iustin Pop
                                              dev, [child]):
3511 a8083063 Iustin Pop
        logger.Error("Can't remove child from mirror, aborting"
3512 a8083063 Iustin Pop
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3513 a8083063 Iustin Pop
        continue
3514 a8083063 Iustin Pop
3515 a8083063 Iustin Pop
      for node in child.logical_id[:2]:
3516 a8083063 Iustin Pop
        logger.Info("remove child device on %s" % node)
3517 a8083063 Iustin Pop
        cfg.SetDiskID(child, node)
3518 a8083063 Iustin Pop
        if not rpc.call_blockdev_remove(node, child):
3519 a8083063 Iustin Pop
          logger.Error("Warning: failed to remove device from node %s,"
3520 a8083063 Iustin Pop
                       " continuing operation." % node)
3521 a8083063 Iustin Pop
3522 a8083063 Iustin Pop
      dev.children.remove(child)
3523 a8083063 Iustin Pop
3524 a8083063 Iustin Pop
      cfg.AddInstance(instance)
3525 a8083063 Iustin Pop
3526 a9e0c397 Iustin Pop
  def _ExecD8DiskOnly(self, feedback_fn):
3527 a9e0c397 Iustin Pop
    """Replace a disk on the primary or secondary for dbrd8.
3528 a9e0c397 Iustin Pop

3529 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3530 a9e0c397 Iustin Pop
      - for each disk to be replaced:
3531 a9e0c397 Iustin Pop
        - create new LVs on the target node with unique names
3532 a9e0c397 Iustin Pop
        - detach old LVs from the drbd device
3533 a9e0c397 Iustin Pop
        - rename old LVs to name_replaced.<time_t>
3534 a9e0c397 Iustin Pop
        - rename new LVs to old LVs
3535 a9e0c397 Iustin Pop
        - attach the new LVs (with the old names now) to the drbd device
3536 a9e0c397 Iustin Pop
      - wait for sync across all devices
3537 a9e0c397 Iustin Pop
      - for each modified disk:
3538 a9e0c397 Iustin Pop
        - remove old LVs (which have the name name_replaces.<time_t>)
3539 a9e0c397 Iustin Pop

3540 a9e0c397 Iustin Pop
    Failures are not very well handled.
3541 a9e0c397 Iustin Pop
    """
3542 a9e0c397 Iustin Pop
    instance = self.instance
3543 a9e0c397 Iustin Pop
    iv_names = {}
3544 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3545 a9e0c397 Iustin Pop
    # start of work
3546 a9e0c397 Iustin Pop
    cfg = self.cfg
3547 a9e0c397 Iustin Pop
    tgt_node = self.tgt_node
3548 a9e0c397 Iustin Pop
    for dev in instance.disks:
3549 a9e0c397 Iustin Pop
      if not dev.iv_name in self.op.disks:
3550 a9e0c397 Iustin Pop
        continue
3551 a9e0c397 Iustin Pop
      size = dev.size
3552 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, tgt_node)
3553 a9e0c397 Iustin Pop
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3554 a9e0c397 Iustin Pop
      names = _GenerateUniqueNames(cfg, lv_names)
3555 a9e0c397 Iustin Pop
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=size,
3556 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[0]))
3557 a9e0c397 Iustin Pop
      lv_meta = objects.Disk(dev_type=constants.LD_LV, size=128,
3558 a9e0c397 Iustin Pop
                             logical_id=(vgname, names[1]))
3559 a9e0c397 Iustin Pop
      new_lvs = [lv_data, lv_meta]
3560 a9e0c397 Iustin Pop
      old_lvs = dev.children
3561 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
3562 a9e0c397 Iustin Pop
      logger.Info("adding new local storage on %s for %s" %
3563 a9e0c397 Iustin Pop
                  (tgt_node, dev.iv_name))
3564 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3565 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3566 a9e0c397 Iustin Pop
      # are talking about the secondary node
3567 a9e0c397 Iustin Pop
      for new_lv in new_lvs:
3568 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, tgt_node, instance, new_lv,
3569 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3570 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3571 a9e0c397 Iustin Pop
                                   " node '%s'" %
3572 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], tgt_node))
3573 a9e0c397 Iustin Pop
3574 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_removechildren(tgt_node, dev, old_lvs):
3575 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach drbd from local storage on node"
3576 a9e0c397 Iustin Pop
                                 " %s for device %s" % (tgt_node, dev.iv_name))
3577 a9e0c397 Iustin Pop
      dev.children = []
3578 a9e0c397 Iustin Pop
      cfg.Update(instance)
3579 a9e0c397 Iustin Pop
3580 a9e0c397 Iustin Pop
      # ok, we created the new LVs, so now we know we have the needed
3581 a9e0c397 Iustin Pop
      # storage; as such, we proceed on the target node to rename
3582 a9e0c397 Iustin Pop
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
3583 a9e0c397 Iustin Pop
      # using the assumption than logical_id == physical_id (which in
3584 a9e0c397 Iustin Pop
      # turn is the unique_id on that node)
3585 a9e0c397 Iustin Pop
      temp_suffix = int(time.time())
3586 a9e0c397 Iustin Pop
      logger.Info("renaming the old LVs on the target node")
3587 a9e0c397 Iustin Pop
      ren_fn = lambda d, suff: (d.physical_id[0],
3588 a9e0c397 Iustin Pop
                                d.physical_id[1] + "_replaced-%s" % suff)
3589 a9e0c397 Iustin Pop
      rlist = [(disk, ren_fn(disk, temp_suffix)) for disk in old_lvs]
3590 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3591 a9e0c397 Iustin Pop
        logger.Error("Can't rename old LVs on node %s" % tgt_node)
3592 a9e0c397 Iustin Pop
        do_change_old = False
3593 a9e0c397 Iustin Pop
      else:
3594 a9e0c397 Iustin Pop
        do_change_old = True
3595 a9e0c397 Iustin Pop
      # now we rename the new LVs to the old LVs
3596 a9e0c397 Iustin Pop
      logger.Info("renaming the new LVs on the target node")
3597 a9e0c397 Iustin Pop
      rlist = [(new, old.physical_id) for old, new in zip(old_lvs, new_lvs)]
3598 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(tgt_node, rlist):
3599 a9e0c397 Iustin Pop
        logger.Error("Can't rename new LVs on node %s" % tgt_node)
3600 a9e0c397 Iustin Pop
      else:
3601 a9e0c397 Iustin Pop
        for old, new in zip(old_lvs, new_lvs):
3602 a9e0c397 Iustin Pop
          new.logical_id = old.logical_id
3603 a9e0c397 Iustin Pop
          cfg.SetDiskID(new, tgt_node)
3604 a9e0c397 Iustin Pop
3605 a9e0c397 Iustin Pop
      if do_change_old:
3606 a9e0c397 Iustin Pop
        for disk in old_lvs:
3607 a9e0c397 Iustin Pop
          disk.logical_id = ren_fn(disk, temp_suffix)
3608 a9e0c397 Iustin Pop
          cfg.SetDiskID(disk, tgt_node)
3609 a9e0c397 Iustin Pop
3610 a9e0c397 Iustin Pop
      # now that the new lvs have the old name, we can add them to the device
3611 a9e0c397 Iustin Pop
      logger.Info("adding new mirror component on %s" % tgt_node)
3612 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_addchildren(tgt_node, dev, new_lvs):
3613 a9e0c397 Iustin Pop
        logger.Error("Can't add local storage to drbd!")
3614 a9e0c397 Iustin Pop
        for new_lv in new_lvs:
3615 a9e0c397 Iustin Pop
          if not rpc.call_blockdev_remove(tgt_node, new_lv):
3616 a9e0c397 Iustin Pop
            logger.Error("Can't rollback device %s")
3617 a9e0c397 Iustin Pop
        return
3618 a9e0c397 Iustin Pop
3619 a9e0c397 Iustin Pop
      dev.children = new_lvs
3620 a9e0c397 Iustin Pop
      cfg.Update(instance)
3621 a9e0c397 Iustin Pop
3622 a9e0c397 Iustin Pop
3623 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3624 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3625 a9e0c397 Iustin Pop
    # return value
3626 a9e0c397 Iustin Pop
    logger.Info("Done changing drbd configs, waiting for sync")
3627 a9e0c397 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3628 a9e0c397 Iustin Pop
3629 a9e0c397 Iustin Pop
    # so check manually all the devices
3630 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3631 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, instance.primary_node)
3632 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3633 a9e0c397 Iustin Pop
      if is_degr:
3634 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3635 a9e0c397 Iustin Pop
3636 a9e0c397 Iustin Pop
    for name, (dev, old_lvs, new_lvs) in iv_names.iteritems():
3637 a9e0c397 Iustin Pop
      logger.Info("remove logical volumes for %s" % name)
3638 a9e0c397 Iustin Pop
      for lv in old_lvs:
3639 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, tgt_node)
3640 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(tgt_node, lv):
3641 a9e0c397 Iustin Pop
          logger.Error("Can't cleanup child device, skipping. You need to"
3642 a9e0c397 Iustin Pop
                       " fix manually!")
3643 a9e0c397 Iustin Pop
          continue
3644 a9e0c397 Iustin Pop
3645 a9e0c397 Iustin Pop
  def _ExecD8Secondary(self, feedback_fn):
3646 a9e0c397 Iustin Pop
    """Replace the secondary node for drbd8.
3647 a9e0c397 Iustin Pop

3648 a9e0c397 Iustin Pop
    The algorithm for replace is quite complicated:
3649 a9e0c397 Iustin Pop
      - for all disks of the instance:
3650 a9e0c397 Iustin Pop
        - create new LVs on the new node with same names
3651 a9e0c397 Iustin Pop
        - shutdown the drbd device on the old secondary
3652 a9e0c397 Iustin Pop
        - disconnect the drbd network on the primary
3653 a9e0c397 Iustin Pop
        - create the drbd device on the new secondary
3654 a9e0c397 Iustin Pop
        - network attach the drbd on the primary, using an artifice:
3655 a9e0c397 Iustin Pop
          the drbd code for Attach() will connect to the network if it
3656 a9e0c397 Iustin Pop
          finds a device which is connected to the good local disks but
3657 a9e0c397 Iustin Pop
          not network enabled
3658 a9e0c397 Iustin Pop
      - wait for sync across all devices
3659 a9e0c397 Iustin Pop
      - remove all disks from the old secondary
3660 a9e0c397 Iustin Pop

3661 a9e0c397 Iustin Pop
    Failures are not very well handled.
3662 a9e0c397 Iustin Pop
    """
3663 a9e0c397 Iustin Pop
    instance = self.instance
3664 a9e0c397 Iustin Pop
    iv_names = {}
3665 a9e0c397 Iustin Pop
    vgname = self.cfg.GetVGName()
3666 a9e0c397 Iustin Pop
    # start of work
3667 a9e0c397 Iustin Pop
    cfg = self.cfg
3668 a9e0c397 Iustin Pop
    old_node = self.tgt_node
3669 a9e0c397 Iustin Pop
    new_node = self.new_node
3670 a9e0c397 Iustin Pop
    pri_node = instance.primary_node
3671 a9e0c397 Iustin Pop
    for dev in instance.disks:
3672 a9e0c397 Iustin Pop
      size = dev.size
3673 a9e0c397 Iustin Pop
      logger.Info("adding new local storage on %s for %s" %
3674 a9e0c397 Iustin Pop
                  (new_node, dev.iv_name))
3675 a9e0c397 Iustin Pop
      # since we *always* want to create this LV, we use the
3676 a9e0c397 Iustin Pop
      # _Create...OnPrimary (which forces the creation), even if we
3677 a9e0c397 Iustin Pop
      # are talking about the secondary node
3678 a9e0c397 Iustin Pop
      for new_lv in dev.children:
3679 3f78eef2 Iustin Pop
        if not _CreateBlockDevOnPrimary(cfg, new_node, instance, new_lv,
3680 a9e0c397 Iustin Pop
                                        _GetInstanceInfoText(instance)):
3681 a9e0c397 Iustin Pop
          raise errors.OpExecError("Failed to create new LV named '%s' on"
3682 a9e0c397 Iustin Pop
                                   " node '%s'" %
3683 a9e0c397 Iustin Pop
                                   (new_lv.logical_id[1], new_node))
3684 a9e0c397 Iustin Pop
3685 a9e0c397 Iustin Pop
      # create new devices on new_node
3686 a9e0c397 Iustin Pop
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
3687 a9e0c397 Iustin Pop
                              logical_id=(pri_node, new_node,
3688 a9e0c397 Iustin Pop
                                          dev.logical_id[2]),
3689 a9e0c397 Iustin Pop
                              children=dev.children)
3690 3f78eef2 Iustin Pop
      if not _CreateBlockDevOnSecondary(cfg, new_node, instance,
3691 3f78eef2 Iustin Pop
                                        new_drbd, False,
3692 a9e0c397 Iustin Pop
                                      _GetInstanceInfoText(instance)):
3693 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to create new DRBD on"
3694 a9e0c397 Iustin Pop
                                 " node '%s'" % new_node)
3695 a9e0c397 Iustin Pop
3696 a9e0c397 Iustin Pop
      # we have new devices, shutdown the drbd on the old secondary
3697 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, old_node)
3698 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_shutdown(old_node, dev):
3699 a9e0c397 Iustin Pop
        raise errors.OpExecError("Failed to shutdown DRBD on old node")
3700 a9e0c397 Iustin Pop
3701 a9e0c397 Iustin Pop
      # we have new storage, we 'rename' the network on the primary
3702 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3703 a9e0c397 Iustin Pop
      # rename to the ip of the new node
3704 a9e0c397 Iustin Pop
      new_uid = list(dev.physical_id)
3705 a9e0c397 Iustin Pop
      new_uid[2] = self.remote_node_info.secondary_ip
3706 a9e0c397 Iustin Pop
      rlist = [(dev, tuple(new_uid))]
3707 a9e0c397 Iustin Pop
      if not rpc.call_blockdev_rename(pri_node, rlist):
3708 a9e0c397 Iustin Pop
        raise errors.OpExecError("Can't detach re-attach drbd %s on node"
3709 a9e0c397 Iustin Pop
                                 " %s from %s to %s" %
3710 a9e0c397 Iustin Pop
                                 (dev.iv_name, pri_node, old_node, new_node))
3711 a9e0c397 Iustin Pop
      dev.logical_id = (pri_node, new_node, dev.logical_id[2])
3712 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3713 a9e0c397 Iustin Pop
      cfg.Update(instance)
3714 a9e0c397 Iustin Pop
3715 a9e0c397 Iustin Pop
      iv_names[dev.iv_name] = (dev, dev.children)
3716 a9e0c397 Iustin Pop
3717 a9e0c397 Iustin Pop
    # this can fail as the old devices are degraded and _WaitForSync
3718 a9e0c397 Iustin Pop
    # does a combined result over all disks, so we don't check its
3719 a9e0c397 Iustin Pop
    # return value
3720 a9e0c397 Iustin Pop
    logger.Info("Done changing drbd configs, waiting for sync")
3721 a9e0c397 Iustin Pop
    _WaitForSync(cfg, instance, unlock=True)
3722 a9e0c397 Iustin Pop
3723 a9e0c397 Iustin Pop
    # so check manually all the devices
3724 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3725 a9e0c397 Iustin Pop
      cfg.SetDiskID(dev, pri_node)
3726 a9e0c397 Iustin Pop
      is_degr = rpc.call_blockdev_find(pri_node, dev)[5]
3727 a9e0c397 Iustin Pop
      if is_degr:
3728 a9e0c397 Iustin Pop
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
3729 a9e0c397 Iustin Pop
3730 a9e0c397 Iustin Pop
    for name, (dev, old_lvs) in iv_names.iteritems():
3731 a9e0c397 Iustin Pop
      logger.Info("remove logical volumes for %s" % name)
3732 a9e0c397 Iustin Pop
      for lv in old_lvs:
3733 a9e0c397 Iustin Pop
        cfg.SetDiskID(lv, old_node)
3734 a9e0c397 Iustin Pop
        if not rpc.call_blockdev_remove(old_node, lv):
3735 a9e0c397 Iustin Pop
          logger.Error("Can't cleanup child device, skipping. You need to"
3736 a9e0c397 Iustin Pop
                       " fix manually!")
3737 a9e0c397 Iustin Pop
          continue
3738 a9e0c397 Iustin Pop
3739 a9e0c397 Iustin Pop
  def Exec(self, feedback_fn):
3740 a9e0c397 Iustin Pop
    """Execute disk replacement.
3741 a9e0c397 Iustin Pop

3742 a9e0c397 Iustin Pop
    This dispatches the disk replacement to the appropriate handler.
3743 a9e0c397 Iustin Pop

3744 a9e0c397 Iustin Pop
    """
3745 a9e0c397 Iustin Pop
    instance = self.instance
3746 a9e0c397 Iustin Pop
    if instance.disk_template == constants.DT_REMOTE_RAID1:
3747 a9e0c397 Iustin Pop
      fn = self._ExecRR1
3748 a9e0c397 Iustin Pop
    elif instance.disk_template == constants.DT_DRBD8:
3749 a9e0c397 Iustin Pop
      if self.op.remote_node is None:
3750 a9e0c397 Iustin Pop
        fn = self._ExecD8DiskOnly
3751 a9e0c397 Iustin Pop
      else:
3752 a9e0c397 Iustin Pop
        fn = self._ExecD8Secondary
3753 a9e0c397 Iustin Pop
    else:
3754 a9e0c397 Iustin Pop
      raise errors.ProgrammerError("Unhandled disk replacement case")
3755 a9e0c397 Iustin Pop
    return fn(feedback_fn)
3756 a9e0c397 Iustin Pop
3757 a8083063 Iustin Pop
3758 a8083063 Iustin Pop
class LUQueryInstanceData(NoHooksLU):
3759 a8083063 Iustin Pop
  """Query runtime instance data.
3760 a8083063 Iustin Pop

3761 a8083063 Iustin Pop
  """
3762 a8083063 Iustin Pop
  _OP_REQP = ["instances"]
3763 a8083063 Iustin Pop
3764 a8083063 Iustin Pop
  def CheckPrereq(self):
3765 a8083063 Iustin Pop
    """Check prerequisites.
3766 a8083063 Iustin Pop

3767 a8083063 Iustin Pop
    This only checks the optional instance list against the existing names.
3768 a8083063 Iustin Pop

3769 a8083063 Iustin Pop
    """
3770 a8083063 Iustin Pop
    if not isinstance(self.op.instances, list):
3771 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Invalid argument type 'instances'")
3772 a8083063 Iustin Pop
    if self.op.instances:
3773 a8083063 Iustin Pop
      self.wanted_instances = []
3774 a8083063 Iustin Pop
      names = self.op.instances
3775 a8083063 Iustin Pop
      for name in names:
3776 a8083063 Iustin Pop
        instance = self.cfg.GetInstanceInfo(self.cfg.ExpandInstanceName(name))
3777 a8083063 Iustin Pop
        if instance is None:
3778 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("No such instance name '%s'" % name)
3779 a8083063 Iustin Pop
      self.wanted_instances.append(instance)
3780 a8083063 Iustin Pop
    else:
3781 a8083063 Iustin Pop
      self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
3782 a8083063 Iustin Pop
                               in self.cfg.GetInstanceList()]
3783 a8083063 Iustin Pop
    return
3784 a8083063 Iustin Pop
3785 a8083063 Iustin Pop
3786 a8083063 Iustin Pop
  def _ComputeDiskStatus(self, instance, snode, dev):
3787 a8083063 Iustin Pop
    """Compute block device status.
3788 a8083063 Iustin Pop

3789 a8083063 Iustin Pop
    """
3790 a8083063 Iustin Pop
    self.cfg.SetDiskID(dev, instance.primary_node)
3791 a8083063 Iustin Pop
    dev_pstatus = rpc.call_blockdev_find(instance.primary_node, dev)
3792 a1f445d3 Iustin Pop
    if dev.dev_type in constants.LDS_DRBD:
3793 a8083063 Iustin Pop
      # we change the snode then (otherwise we use the one passed in)
3794 a8083063 Iustin Pop
      if dev.logical_id[0] == instance.primary_node:
3795 a8083063 Iustin Pop
        snode = dev.logical_id[1]
3796 a8083063 Iustin Pop
      else:
3797 a8083063 Iustin Pop
        snode = dev.logical_id[0]
3798 a8083063 Iustin Pop
3799 a8083063 Iustin Pop
    if snode:
3800 a8083063 Iustin Pop
      self.cfg.SetDiskID(dev, snode)
3801 a8083063 Iustin Pop
      dev_sstatus = rpc.call_blockdev_find(snode, dev)
3802 a8083063 Iustin Pop
    else:
3803 a8083063 Iustin Pop
      dev_sstatus = None
3804 a8083063 Iustin Pop
3805 a8083063 Iustin Pop
    if dev.children:
3806 a8083063 Iustin Pop
      dev_children = [self._ComputeDiskStatus(instance, snode, child)
3807 a8083063 Iustin Pop
                      for child in dev.children]
3808 a8083063 Iustin Pop
    else:
3809 a8083063 Iustin Pop
      dev_children = []
3810 a8083063 Iustin Pop
3811 a8083063 Iustin Pop
    data = {
3812 a8083063 Iustin Pop
      "iv_name": dev.iv_name,
3813 a8083063 Iustin Pop
      "dev_type": dev.dev_type,
3814 a8083063 Iustin Pop
      "logical_id": dev.logical_id,
3815 a8083063 Iustin Pop
      "physical_id": dev.physical_id,
3816 a8083063 Iustin Pop
      "pstatus": dev_pstatus,
3817 a8083063 Iustin Pop
      "sstatus": dev_sstatus,
3818 a8083063 Iustin Pop
      "children": dev_children,
3819 a8083063 Iustin Pop
      }
3820 a8083063 Iustin Pop
3821 a8083063 Iustin Pop
    return data
3822 a8083063 Iustin Pop
3823 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3824 a8083063 Iustin Pop
    """Gather and return data"""
3825 a8083063 Iustin Pop
    result = {}
3826 a8083063 Iustin Pop
    for instance in self.wanted_instances:
3827 a8083063 Iustin Pop
      remote_info = rpc.call_instance_info(instance.primary_node,
3828 a8083063 Iustin Pop
                                                instance.name)
3829 a8083063 Iustin Pop
      if remote_info and "state" in remote_info:
3830 a8083063 Iustin Pop
        remote_state = "up"
3831 a8083063 Iustin Pop
      else:
3832 a8083063 Iustin Pop
        remote_state = "down"
3833 a8083063 Iustin Pop
      if instance.status == "down":
3834 a8083063 Iustin Pop
        config_state = "down"
3835 a8083063 Iustin Pop
      else:
3836 a8083063 Iustin Pop
        config_state = "up"
3837 a8083063 Iustin Pop
3838 a8083063 Iustin Pop
      disks = [self._ComputeDiskStatus(instance, None, device)
3839 a8083063 Iustin Pop
               for device in instance.disks]
3840 a8083063 Iustin Pop
3841 a8083063 Iustin Pop
      idict = {
3842 a8083063 Iustin Pop
        "name": instance.name,
3843 a8083063 Iustin Pop
        "config_state": config_state,
3844 a8083063 Iustin Pop
        "run_state": remote_state,
3845 a8083063 Iustin Pop
        "pnode": instance.primary_node,
3846 a8083063 Iustin Pop
        "snodes": instance.secondary_nodes,
3847 a8083063 Iustin Pop
        "os": instance.os,
3848 a8083063 Iustin Pop
        "memory": instance.memory,
3849 a8083063 Iustin Pop
        "nics": [(nic.mac, nic.ip, nic.bridge) for nic in instance.nics],
3850 a8083063 Iustin Pop
        "disks": disks,
3851 f55ff7ec Iustin Pop
        "vcpus": instance.vcpus,
3852 a8083063 Iustin Pop
        }
3853 a8083063 Iustin Pop
3854 a8083063 Iustin Pop
      result[instance.name] = idict
3855 a8083063 Iustin Pop
3856 a8083063 Iustin Pop
    return result
3857 a8083063 Iustin Pop
3858 a8083063 Iustin Pop
3859 a8083063 Iustin Pop
class LUSetInstanceParms(LogicalUnit):
3860 a8083063 Iustin Pop
  """Modifies an instances's parameters.
3861 a8083063 Iustin Pop

3862 a8083063 Iustin Pop
  """
3863 a8083063 Iustin Pop
  HPATH = "instance-modify"
3864 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3865 a8083063 Iustin Pop
  _OP_REQP = ["instance_name"]
3866 a8083063 Iustin Pop
3867 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3868 a8083063 Iustin Pop
    """Build hooks env.
3869 a8083063 Iustin Pop

3870 a8083063 Iustin Pop
    This runs on the master, primary and secondaries.
3871 a8083063 Iustin Pop

3872 a8083063 Iustin Pop
    """
3873 396e1b78 Michael Hanselmann
    args = dict()
3874 a8083063 Iustin Pop
    if self.mem:
3875 396e1b78 Michael Hanselmann
      args['memory'] = self.mem
3876 a8083063 Iustin Pop
    if self.vcpus:
3877 396e1b78 Michael Hanselmann
      args['vcpus'] = self.vcpus
3878 396e1b78 Michael Hanselmann
    if self.do_ip or self.do_bridge:
3879 396e1b78 Michael Hanselmann
      if self.do_ip:
3880 396e1b78 Michael Hanselmann
        ip = self.ip
3881 396e1b78 Michael Hanselmann
      else:
3882 396e1b78 Michael Hanselmann
        ip = self.instance.nics[0].ip
3883 396e1b78 Michael Hanselmann
      if self.bridge:
3884 396e1b78 Michael Hanselmann
        bridge = self.bridge
3885 396e1b78 Michael Hanselmann
      else:
3886 396e1b78 Michael Hanselmann
        bridge = self.instance.nics[0].bridge
3887 396e1b78 Michael Hanselmann
      args['nics'] = [(ip, bridge)]
3888 396e1b78 Michael Hanselmann
    env = _BuildInstanceHookEnvByObject(self.instance, override=args)
3889 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(),
3890 a8083063 Iustin Pop
          self.instance.primary_node] + list(self.instance.secondary_nodes)
3891 a8083063 Iustin Pop
    return env, nl, nl
3892 a8083063 Iustin Pop
3893 a8083063 Iustin Pop
  def CheckPrereq(self):
3894 a8083063 Iustin Pop
    """Check prerequisites.
3895 a8083063 Iustin Pop

3896 a8083063 Iustin Pop
    This only checks the instance list against the existing names.
3897 a8083063 Iustin Pop

3898 a8083063 Iustin Pop
    """
3899 a8083063 Iustin Pop
    self.mem = getattr(self.op, "mem", None)
3900 a8083063 Iustin Pop
    self.vcpus = getattr(self.op, "vcpus", None)
3901 a8083063 Iustin Pop
    self.ip = getattr(self.op, "ip", None)
3902 a8083063 Iustin Pop
    self.bridge = getattr(self.op, "bridge", None)
3903 a8083063 Iustin Pop
    if [self.mem, self.vcpus, self.ip, self.bridge].count(None) == 4:
3904 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No changes submitted")
3905 a8083063 Iustin Pop
    if self.mem is not None:
3906 a8083063 Iustin Pop
      try:
3907 a8083063 Iustin Pop
        self.mem = int(self.mem)
3908 a8083063 Iustin Pop
      except ValueError, err:
3909 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid memory size: %s" % str(err))
3910 a8083063 Iustin Pop
    if self.vcpus is not None:
3911 a8083063 Iustin Pop
      try:
3912 a8083063 Iustin Pop
        self.vcpus = int(self.vcpus)
3913 a8083063 Iustin Pop
      except ValueError, err:
3914 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid vcpus number: %s" % str(err))
3915 a8083063 Iustin Pop
    if self.ip is not None:
3916 a8083063 Iustin Pop
      self.do_ip = True
3917 a8083063 Iustin Pop
      if self.ip.lower() == "none":
3918 a8083063 Iustin Pop
        self.ip = None
3919 a8083063 Iustin Pop
      else:
3920 a8083063 Iustin Pop
        if not utils.IsValidIP(self.ip):
3921 3ecf6786 Iustin Pop
          raise errors.OpPrereqError("Invalid IP address '%s'." % self.ip)
3922 a8083063 Iustin Pop
    else:
3923 a8083063 Iustin Pop
      self.do_ip = False
3924 ecb215b5 Michael Hanselmann
    self.do_bridge = (self.bridge is not None)
3925 a8083063 Iustin Pop
3926 a8083063 Iustin Pop
    instance = self.cfg.GetInstanceInfo(
3927 a8083063 Iustin Pop
      self.cfg.ExpandInstanceName(self.op.instance_name))
3928 a8083063 Iustin Pop
    if instance is None:
3929 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("No such instance name '%s'" %
3930 3ecf6786 Iustin Pop
                                 self.op.instance_name)
3931 a8083063 Iustin Pop
    self.op.instance_name = instance.name
3932 a8083063 Iustin Pop
    self.instance = instance
3933 a8083063 Iustin Pop
    return
3934 a8083063 Iustin Pop
3935 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3936 a8083063 Iustin Pop
    """Modifies an instance.
3937 a8083063 Iustin Pop

3938 a8083063 Iustin Pop
    All parameters take effect only at the next restart of the instance.
3939 a8083063 Iustin Pop
    """
3940 a8083063 Iustin Pop
    result = []
3941 a8083063 Iustin Pop
    instance = self.instance
3942 a8083063 Iustin Pop
    if self.mem:
3943 a8083063 Iustin Pop
      instance.memory = self.mem
3944 a8083063 Iustin Pop
      result.append(("mem", self.mem))
3945 a8083063 Iustin Pop
    if self.vcpus:
3946 a8083063 Iustin Pop
      instance.vcpus = self.vcpus
3947 a8083063 Iustin Pop
      result.append(("vcpus",  self.vcpus))
3948 a8083063 Iustin Pop
    if self.do_ip:
3949 a8083063 Iustin Pop
      instance.nics[0].ip = self.ip
3950 a8083063 Iustin Pop
      result.append(("ip", self.ip))
3951 a8083063 Iustin Pop
    if self.bridge:
3952 a8083063 Iustin Pop
      instance.nics[0].bridge = self.bridge
3953 a8083063 Iustin Pop
      result.append(("bridge", self.bridge))
3954 a8083063 Iustin Pop
3955 a8083063 Iustin Pop
    self.cfg.AddInstance(instance)
3956 a8083063 Iustin Pop
3957 a8083063 Iustin Pop
    return result
3958 a8083063 Iustin Pop
3959 a8083063 Iustin Pop
3960 a8083063 Iustin Pop
class LUQueryExports(NoHooksLU):
3961 a8083063 Iustin Pop
  """Query the exports list
3962 a8083063 Iustin Pop

3963 a8083063 Iustin Pop
  """
3964 a8083063 Iustin Pop
  _OP_REQP = []
3965 a8083063 Iustin Pop
3966 a8083063 Iustin Pop
  def CheckPrereq(self):
3967 a8083063 Iustin Pop
    """Check that the nodelist contains only existing nodes.
3968 a8083063 Iustin Pop

3969 a8083063 Iustin Pop
    """
3970 dcb93971 Michael Hanselmann
    self.nodes = _GetWantedNodes(self, getattr(self.op, "nodes", None))
3971 a8083063 Iustin Pop
3972 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
3973 a8083063 Iustin Pop
    """Compute the list of all the exported system images.
3974 a8083063 Iustin Pop

3975 a8083063 Iustin Pop
    Returns:
3976 a8083063 Iustin Pop
      a dictionary with the structure node->(export-list)
3977 a8083063 Iustin Pop
      where export-list is a list of the instances exported on
3978 a8083063 Iustin Pop
      that node.
3979 a8083063 Iustin Pop

3980 a8083063 Iustin Pop
    """
3981 a7ba5e53 Iustin Pop
    return rpc.call_export_list(self.nodes)
3982 a8083063 Iustin Pop
3983 a8083063 Iustin Pop
3984 a8083063 Iustin Pop
class LUExportInstance(LogicalUnit):
3985 a8083063 Iustin Pop
  """Export an instance to an image in the cluster.
3986 a8083063 Iustin Pop

3987 a8083063 Iustin Pop
  """
3988 a8083063 Iustin Pop
  HPATH = "instance-export"
3989 a8083063 Iustin Pop
  HTYPE = constants.HTYPE_INSTANCE
3990 a8083063 Iustin Pop
  _OP_REQP = ["instance_name", "target_node", "shutdown"]
3991 a8083063 Iustin Pop
3992 a8083063 Iustin Pop
  def BuildHooksEnv(self):
3993 a8083063 Iustin Pop
    """Build hooks env.
3994 a8083063 Iustin Pop

3995 a8083063 Iustin Pop
    This will run on the master, primary node and target node.
3996 a8083063 Iustin Pop

3997 a8083063 Iustin Pop
    """
3998 a8083063 Iustin Pop
    env = {
3999 a8083063 Iustin Pop
      "EXPORT_NODE": self.op.target_node,
4000 a8083063 Iustin Pop
      "EXPORT_DO_SHUTDOWN": self.op.shutdown,
4001 a8083063 Iustin Pop
      }
4002 396e1b78 Michael Hanselmann
    env.update(_BuildInstanceHookEnvByObject(self.instance))
4003 880478f8 Iustin Pop
    nl = [self.sstore.GetMasterNode(), self.instance.primary_node,
4004 a8083063 Iustin Pop
          self.op.target_node]
4005 a8083063 Iustin Pop
    return env, nl, nl
4006 a8083063 Iustin Pop
4007 a8083063 Iustin Pop
  def CheckPrereq(self):
4008 a8083063 Iustin Pop
    """Check prerequisites.
4009 a8083063 Iustin Pop

4010 a8083063 Iustin Pop
    This checks that the instance name is a valid one.
4011 a8083063 Iustin Pop

4012 a8083063 Iustin Pop
    """
4013 a8083063 Iustin Pop
    instance_name = self.cfg.ExpandInstanceName(self.op.instance_name)
4014 a8083063 Iustin Pop
    self.instance = self.cfg.GetInstanceInfo(instance_name)
4015 a8083063 Iustin Pop
    if self.instance is None:
4016 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Instance '%s' not found" %
4017 3ecf6786 Iustin Pop
                                 self.op.instance_name)
4018 a8083063 Iustin Pop
4019 a8083063 Iustin Pop
    # node verification
4020 a8083063 Iustin Pop
    dst_node_short = self.cfg.ExpandNodeName(self.op.target_node)
4021 a8083063 Iustin Pop
    self.dst_node = self.cfg.GetNodeInfo(dst_node_short)
4022 a8083063 Iustin Pop
4023 a8083063 Iustin Pop
    if self.dst_node is None:
4024 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Destination node '%s' is unknown." %
4025 3ecf6786 Iustin Pop
                                 self.op.target_node)
4026 a8083063 Iustin Pop
    self.op.target_node = self.dst_node.name
4027 a8083063 Iustin Pop
4028 a8083063 Iustin Pop
  def Exec(self, feedback_fn):
4029 a8083063 Iustin Pop
    """Export an instance to an image in the cluster.
4030 a8083063 Iustin Pop

4031 a8083063 Iustin Pop
    """
4032 a8083063 Iustin Pop
    instance = self.instance
4033 a8083063 Iustin Pop
    dst_node = self.dst_node
4034 a8083063 Iustin Pop
    src_node = instance.primary_node
4035 a8083063 Iustin Pop
    # shutdown the instance, unless requested not to do so
4036 a8083063 Iustin Pop
    if self.op.shutdown:
4037 a8083063 Iustin Pop
      op = opcodes.OpShutdownInstance(instance_name=instance.name)
4038 1a8c0ce1 Iustin Pop
      self.processor.ChainOpCode(op)
4039 a8083063 Iustin Pop
4040 a8083063 Iustin Pop
    vgname = self.cfg.GetVGName()
4041 a8083063 Iustin Pop
4042 a8083063 Iustin Pop
    snap_disks = []
4043 a8083063 Iustin Pop
4044 a8083063 Iustin Pop
    try:
4045 a8083063 Iustin Pop
      for disk in instance.disks:
4046 a8083063 Iustin Pop
        if disk.iv_name == "sda":
4047 a8083063 Iustin Pop
          # new_dev_name will be a snapshot of an lvm leaf of the one we passed
4048 a8083063 Iustin Pop
          new_dev_name = rpc.call_blockdev_snapshot(src_node, disk)
4049 a8083063 Iustin Pop
4050 a8083063 Iustin Pop
          if not new_dev_name:
4051 a8083063 Iustin Pop
            logger.Error("could not snapshot block device %s on node %s" %
4052 a8083063 Iustin Pop
                         (disk.logical_id[1], src_node))
4053 a8083063 Iustin Pop
          else:
4054 fe96220b Iustin Pop
            new_dev = objects.Disk(dev_type=constants.LD_LV, size=disk.size,
4055 a8083063 Iustin Pop
                                      logical_id=(vgname, new_dev_name),
4056 a8083063 Iustin Pop
                                      physical_id=(vgname, new_dev_name),
4057 a8083063 Iustin Pop
                                      iv_name=disk.iv_name)
4058 a8083063 Iustin Pop
            snap_disks.append(new_dev)
4059 a8083063 Iustin Pop
4060 a8083063 Iustin Pop
    finally:
4061 a8083063 Iustin Pop
      if self.op.shutdown:
4062 a8083063 Iustin Pop
        op = opcodes.OpStartupInstance(instance_name=instance.name,
4063 a8083063 Iustin Pop
                                       force=False)
4064 1a8c0ce1 Iustin Pop
        self.processor.ChainOpCode(op)
4065 a8083063 Iustin Pop
4066 a8083063 Iustin Pop
    # TODO: check for size
4067 a8083063 Iustin Pop
4068 a8083063 Iustin Pop
    for dev in snap_disks:
4069 a8083063 Iustin Pop
      if not rpc.call_snapshot_export(src_node, dev, dst_node.name,
4070 a8083063 Iustin Pop
                                           instance):
4071 a8083063 Iustin Pop
        logger.Error("could not export block device %s from node"
4072 a8083063 Iustin Pop
                     " %s to node %s" %
4073 a8083063 Iustin Pop
                     (dev.logical_id[1], src_node, dst_node.name))
4074 a8083063 Iustin Pop
      if not rpc.call_blockdev_remove(src_node, dev):
4075 a8083063 Iustin Pop
        logger.Error("could not remove snapshot block device %s from"
4076 a8083063 Iustin Pop
                     " node %s" % (dev.logical_id[1], src_node))
4077 a8083063 Iustin Pop
4078 a8083063 Iustin Pop
    if not rpc.call_finalize_export(dst_node.name, instance, snap_disks):
4079 a8083063 Iustin Pop
      logger.Error("could not finalize export for instance %s on node %s" %
4080 a8083063 Iustin Pop
                   (instance.name, dst_node.name))
4081 a8083063 Iustin Pop
4082 a8083063 Iustin Pop
    nodelist = self.cfg.GetNodeList()
4083 a8083063 Iustin Pop
    nodelist.remove(dst_node.name)
4084 a8083063 Iustin Pop
4085 a8083063 Iustin Pop
    # on one-node clusters nodelist will be empty after the removal
4086 a8083063 Iustin Pop
    # if we proceed the backup would be removed because OpQueryExports
4087 a8083063 Iustin Pop
    # substitutes an empty list with the full cluster node list.
4088 a8083063 Iustin Pop
    if nodelist:
4089 a8083063 Iustin Pop
      op = opcodes.OpQueryExports(nodes=nodelist)
4090 1a8c0ce1 Iustin Pop
      exportlist = self.processor.ChainOpCode(op)
4091 a8083063 Iustin Pop
      for node in exportlist:
4092 a8083063 Iustin Pop
        if instance.name in exportlist[node]:
4093 a8083063 Iustin Pop
          if not rpc.call_export_remove(node, instance.name):
4094 a8083063 Iustin Pop
            logger.Error("could not remove older export for instance %s"
4095 a8083063 Iustin Pop
                         " on node %s" % (instance.name, node))
4096 5c947f38 Iustin Pop
4097 5c947f38 Iustin Pop
4098 5c947f38 Iustin Pop
class TagsLU(NoHooksLU):
4099 5c947f38 Iustin Pop
  """Generic tags LU.
4100 5c947f38 Iustin Pop

4101 5c947f38 Iustin Pop
  This is an abstract class which is the parent of all the other tags LUs.
4102 5c947f38 Iustin Pop

4103 5c947f38 Iustin Pop
  """
4104 5c947f38 Iustin Pop
  def CheckPrereq(self):
4105 5c947f38 Iustin Pop
    """Check prerequisites.
4106 5c947f38 Iustin Pop

4107 5c947f38 Iustin Pop
    """
4108 5c947f38 Iustin Pop
    if self.op.kind == constants.TAG_CLUSTER:
4109 5c947f38 Iustin Pop
      self.target = self.cfg.GetClusterInfo()
4110 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_NODE:
4111 5c947f38 Iustin Pop
      name = self.cfg.ExpandNodeName(self.op.name)
4112 5c947f38 Iustin Pop
      if name is None:
4113 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid node name (%s)" %
4114 3ecf6786 Iustin Pop
                                   (self.op.name,))
4115 5c947f38 Iustin Pop
      self.op.name = name
4116 5c947f38 Iustin Pop
      self.target = self.cfg.GetNodeInfo(name)
4117 5c947f38 Iustin Pop
    elif self.op.kind == constants.TAG_INSTANCE:
4118 8f684e16 Iustin Pop
      name = self.cfg.ExpandInstanceName(self.op.name)
4119 5c947f38 Iustin Pop
      if name is None:
4120 3ecf6786 Iustin Pop
        raise errors.OpPrereqError("Invalid instance name (%s)" %
4121 3ecf6786 Iustin Pop
                                   (self.op.name,))
4122 5c947f38 Iustin Pop
      self.op.name = name
4123 5c947f38 Iustin Pop
      self.target = self.cfg.GetInstanceInfo(name)
4124 5c947f38 Iustin Pop
    else:
4125 3ecf6786 Iustin Pop
      raise errors.OpPrereqError("Wrong tag type requested (%s)" %
4126 3ecf6786 Iustin Pop
                                 str(self.op.kind))
4127 5c947f38 Iustin Pop
4128 5c947f38 Iustin Pop
4129 5c947f38 Iustin Pop
class LUGetTags(TagsLU):
4130 5c947f38 Iustin Pop
  """Returns the tags of a given object.
4131 5c947f38 Iustin Pop

4132 5c947f38 Iustin Pop
  """
4133 5c947f38 Iustin Pop
  _OP_REQP = ["kind", "name"]
4134 5c947f38 Iustin Pop
4135 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4136 5c947f38 Iustin Pop
    """Returns the tag list.
4137 5c947f38 Iustin Pop

4138 5c947f38 Iustin Pop
    """
4139 5c947f38 Iustin Pop
    return self.target.GetTags()
4140 5c947f38 Iustin Pop
4141 5c947f38 Iustin Pop
4142 73415719 Iustin Pop
class LUSearchTags(NoHooksLU):
4143 73415719 Iustin Pop
  """Searches the tags for a given pattern.
4144 73415719 Iustin Pop

4145 73415719 Iustin Pop
  """
4146 73415719 Iustin Pop
  _OP_REQP = ["pattern"]
4147 73415719 Iustin Pop
4148 73415719 Iustin Pop
  def CheckPrereq(self):
4149 73415719 Iustin Pop
    """Check prerequisites.
4150 73415719 Iustin Pop

4151 73415719 Iustin Pop
    This checks the pattern passed for validity by compiling it.
4152 73415719 Iustin Pop

4153 73415719 Iustin Pop
    """
4154 73415719 Iustin Pop
    try:
4155 73415719 Iustin Pop
      self.re = re.compile(self.op.pattern)
4156 73415719 Iustin Pop
    except re.error, err:
4157 73415719 Iustin Pop
      raise errors.OpPrereqError("Invalid search pattern '%s': %s" %
4158 73415719 Iustin Pop
                                 (self.op.pattern, err))
4159 73415719 Iustin Pop
4160 73415719 Iustin Pop
  def Exec(self, feedback_fn):
4161 73415719 Iustin Pop
    """Returns the tag list.
4162 73415719 Iustin Pop

4163 73415719 Iustin Pop
    """
4164 73415719 Iustin Pop
    cfg = self.cfg
4165 73415719 Iustin Pop
    tgts = [("/cluster", cfg.GetClusterInfo())]
4166 73415719 Iustin Pop
    ilist = [cfg.GetInstanceInfo(name) for name in cfg.GetInstanceList()]
4167 73415719 Iustin Pop
    tgts.extend([("/instances/%s" % i.name, i) for i in ilist])
4168 73415719 Iustin Pop
    nlist = [cfg.GetNodeInfo(name) for name in cfg.GetNodeList()]
4169 73415719 Iustin Pop
    tgts.extend([("/nodes/%s" % n.name, n) for n in nlist])
4170 73415719 Iustin Pop
    results = []
4171 73415719 Iustin Pop
    for path, target in tgts:
4172 73415719 Iustin Pop
      for tag in target.GetTags():
4173 73415719 Iustin Pop
        if self.re.search(tag):
4174 73415719 Iustin Pop
          results.append((path, tag))
4175 73415719 Iustin Pop
    return results
4176 73415719 Iustin Pop
4177 73415719 Iustin Pop
4178 f27302fa Iustin Pop
class LUAddTags(TagsLU):
4179 5c947f38 Iustin Pop
  """Sets a tag on a given object.
4180 5c947f38 Iustin Pop

4181 5c947f38 Iustin Pop
  """
4182 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4183 5c947f38 Iustin Pop
4184 5c947f38 Iustin Pop
  def CheckPrereq(self):
4185 5c947f38 Iustin Pop
    """Check prerequisites.
4186 5c947f38 Iustin Pop

4187 5c947f38 Iustin Pop
    This checks the type and length of the tag name and value.
4188 5c947f38 Iustin Pop

4189 5c947f38 Iustin Pop
    """
4190 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4191 f27302fa Iustin Pop
    for tag in self.op.tags:
4192 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4193 5c947f38 Iustin Pop
4194 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4195 5c947f38 Iustin Pop
    """Sets the tag.
4196 5c947f38 Iustin Pop

4197 5c947f38 Iustin Pop
    """
4198 5c947f38 Iustin Pop
    try:
4199 f27302fa Iustin Pop
      for tag in self.op.tags:
4200 f27302fa Iustin Pop
        self.target.AddTag(tag)
4201 5c947f38 Iustin Pop
    except errors.TagError, err:
4202 3ecf6786 Iustin Pop
      raise errors.OpExecError("Error while setting tag: %s" % str(err))
4203 5c947f38 Iustin Pop
    try:
4204 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4205 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4206 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4207 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4208 3ecf6786 Iustin Pop
                                " aborted. Please retry.")
4209 5c947f38 Iustin Pop
4210 5c947f38 Iustin Pop
4211 f27302fa Iustin Pop
class LUDelTags(TagsLU):
4212 f27302fa Iustin Pop
  """Delete a list of tags from a given object.
4213 5c947f38 Iustin Pop

4214 5c947f38 Iustin Pop
  """
4215 f27302fa Iustin Pop
  _OP_REQP = ["kind", "name", "tags"]
4216 5c947f38 Iustin Pop
4217 5c947f38 Iustin Pop
  def CheckPrereq(self):
4218 5c947f38 Iustin Pop
    """Check prerequisites.
4219 5c947f38 Iustin Pop

4220 5c947f38 Iustin Pop
    This checks that we have the given tag.
4221 5c947f38 Iustin Pop

4222 5c947f38 Iustin Pop
    """
4223 5c947f38 Iustin Pop
    TagsLU.CheckPrereq(self)
4224 f27302fa Iustin Pop
    for tag in self.op.tags:
4225 f27302fa Iustin Pop
      objects.TaggableObject.ValidateTag(tag)
4226 f27302fa Iustin Pop
    del_tags = frozenset(self.op.tags)
4227 f27302fa Iustin Pop
    cur_tags = self.target.GetTags()
4228 f27302fa Iustin Pop
    if not del_tags <= cur_tags:
4229 f27302fa Iustin Pop
      diff_tags = del_tags - cur_tags
4230 f27302fa Iustin Pop
      diff_names = ["'%s'" % tag for tag in diff_tags]
4231 f27302fa Iustin Pop
      diff_names.sort()
4232 f27302fa Iustin Pop
      raise errors.OpPrereqError("Tag(s) %s not found" %
4233 f27302fa Iustin Pop
                                 (",".join(diff_names)))
4234 5c947f38 Iustin Pop
4235 5c947f38 Iustin Pop
  def Exec(self, feedback_fn):
4236 5c947f38 Iustin Pop
    """Remove the tag from the object.
4237 5c947f38 Iustin Pop

4238 5c947f38 Iustin Pop
    """
4239 f27302fa Iustin Pop
    for tag in self.op.tags:
4240 f27302fa Iustin Pop
      self.target.RemoveTag(tag)
4241 5c947f38 Iustin Pop
    try:
4242 5c947f38 Iustin Pop
      self.cfg.Update(self.target)
4243 5c947f38 Iustin Pop
    except errors.ConfigurationError:
4244 3ecf6786 Iustin Pop
      raise errors.OpRetryError("There has been a modification to the"
4245 3ecf6786 Iustin Pop
                                " config file and the operation has been"
4246 3ecf6786 Iustin Pop
                                " aborted. Please retry.")