Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_storage.py @ 3f3ea14c

History | View | Annotate | Download (93.5 kB)

1 763ad5be Thomas Thrainer
#
2 763ad5be Thomas Thrainer
#
3 763ad5be Thomas Thrainer
4 763ad5be Thomas Thrainer
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 763ad5be Thomas Thrainer
#
6 763ad5be Thomas Thrainer
# This program is free software; you can redistribute it and/or modify
7 763ad5be Thomas Thrainer
# it under the terms of the GNU General Public License as published by
8 763ad5be Thomas Thrainer
# the Free Software Foundation; either version 2 of the License, or
9 763ad5be Thomas Thrainer
# (at your option) any later version.
10 763ad5be Thomas Thrainer
#
11 763ad5be Thomas Thrainer
# This program is distributed in the hope that it will be useful, but
12 763ad5be Thomas Thrainer
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 763ad5be Thomas Thrainer
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 763ad5be Thomas Thrainer
# General Public License for more details.
15 763ad5be Thomas Thrainer
#
16 763ad5be Thomas Thrainer
# You should have received a copy of the GNU General Public License
17 763ad5be Thomas Thrainer
# along with this program; if not, write to the Free Software
18 763ad5be Thomas Thrainer
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 763ad5be Thomas Thrainer
# 02110-1301, USA.
20 763ad5be Thomas Thrainer
21 763ad5be Thomas Thrainer
22 763ad5be Thomas Thrainer
"""Logical units dealing with storage of instances."""
23 763ad5be Thomas Thrainer
24 763ad5be Thomas Thrainer
import itertools
25 763ad5be Thomas Thrainer
import logging
26 763ad5be Thomas Thrainer
import os
27 763ad5be Thomas Thrainer
import time
28 763ad5be Thomas Thrainer
29 763ad5be Thomas Thrainer
from ganeti import compat
30 763ad5be Thomas Thrainer
from ganeti import constants
31 763ad5be Thomas Thrainer
from ganeti import errors
32 763ad5be Thomas Thrainer
from ganeti import ht
33 763ad5be Thomas Thrainer
from ganeti import locking
34 763ad5be Thomas Thrainer
from ganeti.masterd import iallocator
35 763ad5be Thomas Thrainer
from ganeti import objects
36 763ad5be Thomas Thrainer
from ganeti import utils
37 763ad5be Thomas Thrainer
from ganeti import opcodes
38 763ad5be Thomas Thrainer
from ganeti import rpc
39 763ad5be Thomas Thrainer
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
40 763ad5be Thomas Thrainer
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
41 5eacbcae Thomas Thrainer
  AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeName, \
42 5eacbcae Thomas Thrainer
  CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
43 5eacbcae Thomas Thrainer
  IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks
44 5eacbcae Thomas Thrainer
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
45 5eacbcae Thomas Thrainer
  CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
46 5eacbcae Thomas Thrainer
  BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
47 763ad5be Thomas Thrainer
48 763ad5be Thomas Thrainer
import ganeti.masterd.instance
49 763ad5be Thomas Thrainer
50 763ad5be Thomas Thrainer
51 763ad5be Thomas Thrainer
_DISK_TEMPLATE_NAME_PREFIX = {
52 763ad5be Thomas Thrainer
  constants.DT_PLAIN: "",
53 763ad5be Thomas Thrainer
  constants.DT_RBD: ".rbd",
54 763ad5be Thomas Thrainer
  constants.DT_EXT: ".ext",
55 763ad5be Thomas Thrainer
  }
56 763ad5be Thomas Thrainer
57 763ad5be Thomas Thrainer
58 763ad5be Thomas Thrainer
_DISK_TEMPLATE_DEVICE_TYPE = {
59 763ad5be Thomas Thrainer
  constants.DT_PLAIN: constants.LD_LV,
60 763ad5be Thomas Thrainer
  constants.DT_FILE: constants.LD_FILE,
61 763ad5be Thomas Thrainer
  constants.DT_SHARED_FILE: constants.LD_FILE,
62 763ad5be Thomas Thrainer
  constants.DT_BLOCK: constants.LD_BLOCKDEV,
63 763ad5be Thomas Thrainer
  constants.DT_RBD: constants.LD_RBD,
64 763ad5be Thomas Thrainer
  constants.DT_EXT: constants.LD_EXT,
65 763ad5be Thomas Thrainer
  }
66 763ad5be Thomas Thrainer
67 763ad5be Thomas Thrainer
68 5eacbcae Thomas Thrainer
def CreateSingleBlockDev(lu, node, instance, device, info, force_open,
69 5eacbcae Thomas Thrainer
                         excl_stor):
70 763ad5be Thomas Thrainer
  """Create a single block device on a given node.
71 763ad5be Thomas Thrainer

72 763ad5be Thomas Thrainer
  This will not recurse over children of the device, so they must be
73 763ad5be Thomas Thrainer
  created in advance.
74 763ad5be Thomas Thrainer

75 763ad5be Thomas Thrainer
  @param lu: the lu on whose behalf we execute
76 763ad5be Thomas Thrainer
  @param node: the node on which to create the device
77 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
78 763ad5be Thomas Thrainer
  @param instance: the instance which owns the device
79 763ad5be Thomas Thrainer
  @type device: L{objects.Disk}
80 763ad5be Thomas Thrainer
  @param device: the device to create
81 763ad5be Thomas Thrainer
  @param info: the extra 'metadata' we should attach to the device
82 763ad5be Thomas Thrainer
      (this will be represented as a LVM tag)
83 763ad5be Thomas Thrainer
  @type force_open: boolean
84 763ad5be Thomas Thrainer
  @param force_open: this parameter will be passes to the
85 763ad5be Thomas Thrainer
      L{backend.BlockdevCreate} function where it specifies
86 763ad5be Thomas Thrainer
      whether we run on primary or not, and it affects both
87 763ad5be Thomas Thrainer
      the child assembly and the device own Open() execution
88 763ad5be Thomas Thrainer
  @type excl_stor: boolean
89 763ad5be Thomas Thrainer
  @param excl_stor: Whether exclusive_storage is active for the node
90 763ad5be Thomas Thrainer

91 763ad5be Thomas Thrainer
  """
92 763ad5be Thomas Thrainer
  lu.cfg.SetDiskID(device, node)
93 763ad5be Thomas Thrainer
  result = lu.rpc.call_blockdev_create(node, device, device.size,
94 763ad5be Thomas Thrainer
                                       instance.name, force_open, info,
95 763ad5be Thomas Thrainer
                                       excl_stor)
96 763ad5be Thomas Thrainer
  result.Raise("Can't create block device %s on"
97 763ad5be Thomas Thrainer
               " node %s for instance %s" % (device, node, instance.name))
98 763ad5be Thomas Thrainer
  if device.physical_id is None:
99 763ad5be Thomas Thrainer
    device.physical_id = result.payload
100 763ad5be Thomas Thrainer
101 763ad5be Thomas Thrainer
102 763ad5be Thomas Thrainer
def _CreateBlockDevInner(lu, node, instance, device, force_create,
103 763ad5be Thomas Thrainer
                         info, force_open, excl_stor):
104 763ad5be Thomas Thrainer
  """Create a tree of block devices on a given node.
105 763ad5be Thomas Thrainer

106 763ad5be Thomas Thrainer
  If this device type has to be created on secondaries, create it and
107 763ad5be Thomas Thrainer
  all its children.
108 763ad5be Thomas Thrainer

109 763ad5be Thomas Thrainer
  If not, just recurse to children keeping the same 'force' value.
110 763ad5be Thomas Thrainer

111 763ad5be Thomas Thrainer
  @attention: The device has to be annotated already.
112 763ad5be Thomas Thrainer

113 763ad5be Thomas Thrainer
  @param lu: the lu on whose behalf we execute
114 763ad5be Thomas Thrainer
  @param node: the node on which to create the device
115 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
116 763ad5be Thomas Thrainer
  @param instance: the instance which owns the device
117 763ad5be Thomas Thrainer
  @type device: L{objects.Disk}
118 763ad5be Thomas Thrainer
  @param device: the device to create
119 763ad5be Thomas Thrainer
  @type force_create: boolean
120 763ad5be Thomas Thrainer
  @param force_create: whether to force creation of this device; this
121 763ad5be Thomas Thrainer
      will be change to True whenever we find a device which has
122 763ad5be Thomas Thrainer
      CreateOnSecondary() attribute
123 763ad5be Thomas Thrainer
  @param info: the extra 'metadata' we should attach to the device
124 763ad5be Thomas Thrainer
      (this will be represented as a LVM tag)
125 763ad5be Thomas Thrainer
  @type force_open: boolean
126 763ad5be Thomas Thrainer
  @param force_open: this parameter will be passes to the
127 763ad5be Thomas Thrainer
      L{backend.BlockdevCreate} function where it specifies
128 763ad5be Thomas Thrainer
      whether we run on primary or not, and it affects both
129 763ad5be Thomas Thrainer
      the child assembly and the device own Open() execution
130 763ad5be Thomas Thrainer
  @type excl_stor: boolean
131 763ad5be Thomas Thrainer
  @param excl_stor: Whether exclusive_storage is active for the node
132 763ad5be Thomas Thrainer

133 763ad5be Thomas Thrainer
  @return: list of created devices
134 763ad5be Thomas Thrainer
  """
135 763ad5be Thomas Thrainer
  created_devices = []
136 763ad5be Thomas Thrainer
  try:
137 763ad5be Thomas Thrainer
    if device.CreateOnSecondary():
138 763ad5be Thomas Thrainer
      force_create = True
139 763ad5be Thomas Thrainer
140 763ad5be Thomas Thrainer
    if device.children:
141 763ad5be Thomas Thrainer
      for child in device.children:
142 763ad5be Thomas Thrainer
        devs = _CreateBlockDevInner(lu, node, instance, child, force_create,
143 763ad5be Thomas Thrainer
                                    info, force_open, excl_stor)
144 763ad5be Thomas Thrainer
        created_devices.extend(devs)
145 763ad5be Thomas Thrainer
146 763ad5be Thomas Thrainer
    if not force_create:
147 763ad5be Thomas Thrainer
      return created_devices
148 763ad5be Thomas Thrainer
149 5eacbcae Thomas Thrainer
    CreateSingleBlockDev(lu, node, instance, device, info, force_open,
150 5eacbcae Thomas Thrainer
                         excl_stor)
151 763ad5be Thomas Thrainer
    # The device has been completely created, so there is no point in keeping
152 763ad5be Thomas Thrainer
    # its subdevices in the list. We just add the device itself instead.
153 763ad5be Thomas Thrainer
    created_devices = [(node, device)]
154 763ad5be Thomas Thrainer
    return created_devices
155 763ad5be Thomas Thrainer
156 763ad5be Thomas Thrainer
  except errors.DeviceCreationError, e:
157 763ad5be Thomas Thrainer
    e.created_devices.extend(created_devices)
158 763ad5be Thomas Thrainer
    raise e
159 763ad5be Thomas Thrainer
  except errors.OpExecError, e:
160 763ad5be Thomas Thrainer
    raise errors.DeviceCreationError(str(e), created_devices)
161 763ad5be Thomas Thrainer
162 763ad5be Thomas Thrainer
163 5eacbcae Thomas Thrainer
def IsExclusiveStorageEnabledNodeName(cfg, nodename):
164 763ad5be Thomas Thrainer
  """Whether exclusive_storage is in effect for the given node.
165 763ad5be Thomas Thrainer

166 763ad5be Thomas Thrainer
  @type cfg: L{config.ConfigWriter}
167 763ad5be Thomas Thrainer
  @param cfg: The cluster configuration
168 763ad5be Thomas Thrainer
  @type nodename: string
169 763ad5be Thomas Thrainer
  @param nodename: The node
170 763ad5be Thomas Thrainer
  @rtype: bool
171 763ad5be Thomas Thrainer
  @return: The effective value of exclusive_storage
172 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if no node exists with the given name
173 763ad5be Thomas Thrainer

174 763ad5be Thomas Thrainer
  """
175 763ad5be Thomas Thrainer
  ni = cfg.GetNodeInfo(nodename)
176 763ad5be Thomas Thrainer
  if ni is None:
177 763ad5be Thomas Thrainer
    raise errors.OpPrereqError("Invalid node name %s" % nodename,
178 763ad5be Thomas Thrainer
                               errors.ECODE_NOENT)
179 5eacbcae Thomas Thrainer
  return IsExclusiveStorageEnabledNode(cfg, ni)
180 763ad5be Thomas Thrainer
181 763ad5be Thomas Thrainer
182 a365b47f Bernardo Dal Seno
def _CreateBlockDev(lu, node, instance, device, force_create, info,
183 763ad5be Thomas Thrainer
                    force_open):
184 763ad5be Thomas Thrainer
  """Wrapper around L{_CreateBlockDevInner}.
185 763ad5be Thomas Thrainer

186 763ad5be Thomas Thrainer
  This method annotates the root device first.
187 763ad5be Thomas Thrainer

188 763ad5be Thomas Thrainer
  """
189 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [device], lu.cfg)
190 5eacbcae Thomas Thrainer
  excl_stor = IsExclusiveStorageEnabledNodeName(lu.cfg, node)
191 763ad5be Thomas Thrainer
  return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
192 763ad5be Thomas Thrainer
                              force_open, excl_stor)
193 763ad5be Thomas Thrainer
194 763ad5be Thomas Thrainer
195 a365b47f Bernardo Dal Seno
def _UndoCreateDisks(lu, disks_created):
196 a365b47f Bernardo Dal Seno
  """Undo the work performed by L{CreateDisks}.
197 a365b47f Bernardo Dal Seno

198 a365b47f Bernardo Dal Seno
  This function is called in case of an error to undo the work of
199 a365b47f Bernardo Dal Seno
  L{CreateDisks}.
200 a365b47f Bernardo Dal Seno

201 a365b47f Bernardo Dal Seno
  @type lu: L{LogicalUnit}
202 a365b47f Bernardo Dal Seno
  @param lu: the logical unit on whose behalf we execute
203 a365b47f Bernardo Dal Seno
  @param disks_created: the result returned by L{CreateDisks}
204 a365b47f Bernardo Dal Seno

205 a365b47f Bernardo Dal Seno
  """
206 a365b47f Bernardo Dal Seno
  for (node, disk) in disks_created:
207 a365b47f Bernardo Dal Seno
    lu.cfg.SetDiskID(disk, node)
208 a365b47f Bernardo Dal Seno
    result = lu.rpc.call_blockdev_remove(node, disk)
209 a365b47f Bernardo Dal Seno
    if result.fail_msg:
210 a365b47f Bernardo Dal Seno
      logging.warning("Failed to remove newly-created disk %s on node %s:"
211 a365b47f Bernardo Dal Seno
                      " %s", disk, node, result.fail_msg)
212 a365b47f Bernardo Dal Seno
213 a365b47f Bernardo Dal Seno
214 a365b47f Bernardo Dal Seno
def CreateDisks(lu, instance, to_skip=None, target_node=None, disks=None):
215 763ad5be Thomas Thrainer
  """Create all disks for an instance.
216 763ad5be Thomas Thrainer

217 763ad5be Thomas Thrainer
  This abstracts away some work from AddInstance.
218 763ad5be Thomas Thrainer

219 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
220 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
221 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
222 763ad5be Thomas Thrainer
  @param instance: the instance whose disks we should create
223 763ad5be Thomas Thrainer
  @type to_skip: list
224 763ad5be Thomas Thrainer
  @param to_skip: list of indices to skip
225 763ad5be Thomas Thrainer
  @type target_node: string
226 763ad5be Thomas Thrainer
  @param target_node: if passed, overrides the target node for creation
227 a365b47f Bernardo Dal Seno
  @type disks: list of {objects.Disk}
228 a365b47f Bernardo Dal Seno
  @param disks: the disks to create; if not specified, all the disks of the
229 a365b47f Bernardo Dal Seno
      instance are created
230 a365b47f Bernardo Dal Seno
  @return: information about the created disks, to be used to call
231 a365b47f Bernardo Dal Seno
      L{_UndoCreateDisks}
232 a365b47f Bernardo Dal Seno
  @raise errors.OpPrereqError: in case of error
233 763ad5be Thomas Thrainer

234 763ad5be Thomas Thrainer
  """
235 5eacbcae Thomas Thrainer
  info = GetInstanceInfoText(instance)
236 763ad5be Thomas Thrainer
  if target_node is None:
237 763ad5be Thomas Thrainer
    pnode = instance.primary_node
238 763ad5be Thomas Thrainer
    all_nodes = instance.all_nodes
239 763ad5be Thomas Thrainer
  else:
240 763ad5be Thomas Thrainer
    pnode = target_node
241 763ad5be Thomas Thrainer
    all_nodes = [pnode]
242 763ad5be Thomas Thrainer
243 a365b47f Bernardo Dal Seno
  if disks is None:
244 a365b47f Bernardo Dal Seno
    disks = instance.disks
245 a365b47f Bernardo Dal Seno
246 763ad5be Thomas Thrainer
  if instance.disk_template in constants.DTS_FILEBASED:
247 763ad5be Thomas Thrainer
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
248 763ad5be Thomas Thrainer
    result = lu.rpc.call_file_storage_dir_create(pnode, file_storage_dir)
249 763ad5be Thomas Thrainer
250 763ad5be Thomas Thrainer
    result.Raise("Failed to create directory '%s' on"
251 763ad5be Thomas Thrainer
                 " node %s" % (file_storage_dir, pnode))
252 763ad5be Thomas Thrainer
253 763ad5be Thomas Thrainer
  disks_created = []
254 a365b47f Bernardo Dal Seno
  for idx, device in enumerate(disks):
255 763ad5be Thomas Thrainer
    if to_skip and idx in to_skip:
256 763ad5be Thomas Thrainer
      continue
257 763ad5be Thomas Thrainer
    logging.info("Creating disk %s for instance '%s'", idx, instance.name)
258 763ad5be Thomas Thrainer
    for node in all_nodes:
259 763ad5be Thomas Thrainer
      f_create = node == pnode
260 763ad5be Thomas Thrainer
      try:
261 a365b47f Bernardo Dal Seno
        _CreateBlockDev(lu, node, instance, device, f_create, info, f_create)
262 763ad5be Thomas Thrainer
        disks_created.append((node, device))
263 763ad5be Thomas Thrainer
      except errors.DeviceCreationError, e:
264 763ad5be Thomas Thrainer
        logging.warning("Creating disk %s for instance '%s' failed",
265 763ad5be Thomas Thrainer
                        idx, instance.name)
266 763ad5be Thomas Thrainer
        disks_created.extend(e.created_devices)
267 a365b47f Bernardo Dal Seno
        _UndoCreateDisks(lu, disks_created)
268 763ad5be Thomas Thrainer
        raise errors.OpExecError(e.message)
269 a365b47f Bernardo Dal Seno
  return disks_created
270 763ad5be Thomas Thrainer
271 763ad5be Thomas Thrainer
272 5eacbcae Thomas Thrainer
def ComputeDiskSizePerVG(disk_template, disks):
273 763ad5be Thomas Thrainer
  """Compute disk size requirements in the volume group
274 763ad5be Thomas Thrainer

275 763ad5be Thomas Thrainer
  """
276 763ad5be Thomas Thrainer
  def _compute(disks, payload):
277 763ad5be Thomas Thrainer
    """Universal algorithm.
278 763ad5be Thomas Thrainer

279 763ad5be Thomas Thrainer
    """
280 763ad5be Thomas Thrainer
    vgs = {}
281 763ad5be Thomas Thrainer
    for disk in disks:
282 763ad5be Thomas Thrainer
      vgs[disk[constants.IDISK_VG]] = \
283 763ad5be Thomas Thrainer
        vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
284 763ad5be Thomas Thrainer
285 763ad5be Thomas Thrainer
    return vgs
286 763ad5be Thomas Thrainer
287 763ad5be Thomas Thrainer
  # Required free disk space as a function of disk and swap space
288 763ad5be Thomas Thrainer
  req_size_dict = {
289 763ad5be Thomas Thrainer
    constants.DT_DISKLESS: {},
290 763ad5be Thomas Thrainer
    constants.DT_PLAIN: _compute(disks, 0),
291 763ad5be Thomas Thrainer
    # 128 MB are added for drbd metadata for each disk
292 763ad5be Thomas Thrainer
    constants.DT_DRBD8: _compute(disks, constants.DRBD_META_SIZE),
293 763ad5be Thomas Thrainer
    constants.DT_FILE: {},
294 763ad5be Thomas Thrainer
    constants.DT_SHARED_FILE: {},
295 763ad5be Thomas Thrainer
    }
296 763ad5be Thomas Thrainer
297 763ad5be Thomas Thrainer
  if disk_template not in req_size_dict:
298 763ad5be Thomas Thrainer
    raise errors.ProgrammerError("Disk template '%s' size requirement"
299 763ad5be Thomas Thrainer
                                 " is unknown" % disk_template)
300 763ad5be Thomas Thrainer
301 763ad5be Thomas Thrainer
  return req_size_dict[disk_template]
302 763ad5be Thomas Thrainer
303 763ad5be Thomas Thrainer
304 5eacbcae Thomas Thrainer
def ComputeDisks(op, default_vg):
305 763ad5be Thomas Thrainer
  """Computes the instance disks.
306 763ad5be Thomas Thrainer

307 763ad5be Thomas Thrainer
  @param op: The instance opcode
308 763ad5be Thomas Thrainer
  @param default_vg: The default_vg to assume
309 763ad5be Thomas Thrainer

310 763ad5be Thomas Thrainer
  @return: The computed disks
311 763ad5be Thomas Thrainer

312 763ad5be Thomas Thrainer
  """
313 763ad5be Thomas Thrainer
  disks = []
314 763ad5be Thomas Thrainer
  for disk in op.disks:
315 763ad5be Thomas Thrainer
    mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
316 763ad5be Thomas Thrainer
    if mode not in constants.DISK_ACCESS_SET:
317 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk access mode '%s'" %
318 763ad5be Thomas Thrainer
                                 mode, errors.ECODE_INVAL)
319 763ad5be Thomas Thrainer
    size = disk.get(constants.IDISK_SIZE, None)
320 763ad5be Thomas Thrainer
    if size is None:
321 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
322 763ad5be Thomas Thrainer
    try:
323 763ad5be Thomas Thrainer
      size = int(size)
324 763ad5be Thomas Thrainer
    except (TypeError, ValueError):
325 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk size '%s'" % size,
326 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
327 763ad5be Thomas Thrainer
328 763ad5be Thomas Thrainer
    ext_provider = disk.get(constants.IDISK_PROVIDER, None)
329 763ad5be Thomas Thrainer
    if ext_provider and op.disk_template != constants.DT_EXT:
330 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The '%s' option is only valid for the %s"
331 763ad5be Thomas Thrainer
                                 " disk template, not %s" %
332 763ad5be Thomas Thrainer
                                 (constants.IDISK_PROVIDER, constants.DT_EXT,
333 763ad5be Thomas Thrainer
                                  op.disk_template), errors.ECODE_INVAL)
334 763ad5be Thomas Thrainer
335 763ad5be Thomas Thrainer
    data_vg = disk.get(constants.IDISK_VG, default_vg)
336 763ad5be Thomas Thrainer
    name = disk.get(constants.IDISK_NAME, None)
337 763ad5be Thomas Thrainer
    if name is not None and name.lower() == constants.VALUE_NONE:
338 763ad5be Thomas Thrainer
      name = None
339 763ad5be Thomas Thrainer
    new_disk = {
340 763ad5be Thomas Thrainer
      constants.IDISK_SIZE: size,
341 763ad5be Thomas Thrainer
      constants.IDISK_MODE: mode,
342 763ad5be Thomas Thrainer
      constants.IDISK_VG: data_vg,
343 763ad5be Thomas Thrainer
      constants.IDISK_NAME: name,
344 763ad5be Thomas Thrainer
      }
345 763ad5be Thomas Thrainer
346 3f3ea14c Bernardo Dal Seno
    for key in [
347 3f3ea14c Bernardo Dal Seno
      constants.IDISK_METAVG,
348 3f3ea14c Bernardo Dal Seno
      constants.IDISK_ADOPT,
349 3f3ea14c Bernardo Dal Seno
      constants.IDISK_SPINDLES,
350 3f3ea14c Bernardo Dal Seno
      ]:
351 3f3ea14c Bernardo Dal Seno
      if key in disk:
352 3f3ea14c Bernardo Dal Seno
        new_disk[key] = disk[key]
353 763ad5be Thomas Thrainer
354 763ad5be Thomas Thrainer
    # For extstorage, demand the `provider' option and add any
355 763ad5be Thomas Thrainer
    # additional parameters (ext-params) to the dict
356 763ad5be Thomas Thrainer
    if op.disk_template == constants.DT_EXT:
357 763ad5be Thomas Thrainer
      if ext_provider:
358 763ad5be Thomas Thrainer
        new_disk[constants.IDISK_PROVIDER] = ext_provider
359 763ad5be Thomas Thrainer
        for key in disk:
360 763ad5be Thomas Thrainer
          if key not in constants.IDISK_PARAMS:
361 763ad5be Thomas Thrainer
            new_disk[key] = disk[key]
362 763ad5be Thomas Thrainer
      else:
363 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Missing provider for template '%s'" %
364 763ad5be Thomas Thrainer
                                   constants.DT_EXT, errors.ECODE_INVAL)
365 763ad5be Thomas Thrainer
366 763ad5be Thomas Thrainer
    disks.append(new_disk)
367 763ad5be Thomas Thrainer
368 763ad5be Thomas Thrainer
  return disks
369 763ad5be Thomas Thrainer
370 763ad5be Thomas Thrainer
371 5eacbcae Thomas Thrainer
def CheckRADOSFreeSpace():
372 763ad5be Thomas Thrainer
  """Compute disk size requirements inside the RADOS cluster.
373 763ad5be Thomas Thrainer

374 763ad5be Thomas Thrainer
  """
375 763ad5be Thomas Thrainer
  # For the RADOS cluster we assume there is always enough space.
376 763ad5be Thomas Thrainer
  pass
377 763ad5be Thomas Thrainer
378 763ad5be Thomas Thrainer
379 763ad5be Thomas Thrainer
def _GenerateDRBD8Branch(lu, primary, secondary, size, vgnames, names,
380 763ad5be Thomas Thrainer
                         iv_name, p_minor, s_minor):
381 763ad5be Thomas Thrainer
  """Generate a drbd8 device complete with its children.
382 763ad5be Thomas Thrainer

383 763ad5be Thomas Thrainer
  """
384 763ad5be Thomas Thrainer
  assert len(vgnames) == len(names) == 2
385 763ad5be Thomas Thrainer
  port = lu.cfg.AllocatePort()
386 763ad5be Thomas Thrainer
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
387 763ad5be Thomas Thrainer
388 763ad5be Thomas Thrainer
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
389 763ad5be Thomas Thrainer
                          logical_id=(vgnames[0], names[0]),
390 763ad5be Thomas Thrainer
                          params={})
391 763ad5be Thomas Thrainer
  dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
392 763ad5be Thomas Thrainer
  dev_meta = objects.Disk(dev_type=constants.LD_LV,
393 763ad5be Thomas Thrainer
                          size=constants.DRBD_META_SIZE,
394 763ad5be Thomas Thrainer
                          logical_id=(vgnames[1], names[1]),
395 763ad5be Thomas Thrainer
                          params={})
396 763ad5be Thomas Thrainer
  dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
397 763ad5be Thomas Thrainer
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
398 763ad5be Thomas Thrainer
                          logical_id=(primary, secondary, port,
399 763ad5be Thomas Thrainer
                                      p_minor, s_minor,
400 763ad5be Thomas Thrainer
                                      shared_secret),
401 763ad5be Thomas Thrainer
                          children=[dev_data, dev_meta],
402 763ad5be Thomas Thrainer
                          iv_name=iv_name, params={})
403 763ad5be Thomas Thrainer
  drbd_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
404 763ad5be Thomas Thrainer
  return drbd_dev
405 763ad5be Thomas Thrainer
406 763ad5be Thomas Thrainer
407 5eacbcae Thomas Thrainer
def GenerateDiskTemplate(
408 763ad5be Thomas Thrainer
  lu, template_name, instance_name, primary_node, secondary_nodes,
409 763ad5be Thomas Thrainer
  disk_info, file_storage_dir, file_driver, base_index,
410 763ad5be Thomas Thrainer
  feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
411 763ad5be Thomas Thrainer
  _req_shr_file_storage=opcodes.RequireSharedFileStorage):
412 763ad5be Thomas Thrainer
  """Generate the entire disk layout for a given template type.
413 763ad5be Thomas Thrainer

414 763ad5be Thomas Thrainer
  """
415 763ad5be Thomas Thrainer
  vgname = lu.cfg.GetVGName()
416 763ad5be Thomas Thrainer
  disk_count = len(disk_info)
417 763ad5be Thomas Thrainer
  disks = []
418 763ad5be Thomas Thrainer
419 763ad5be Thomas Thrainer
  if template_name == constants.DT_DISKLESS:
420 763ad5be Thomas Thrainer
    pass
421 763ad5be Thomas Thrainer
  elif template_name == constants.DT_DRBD8:
422 763ad5be Thomas Thrainer
    if len(secondary_nodes) != 1:
423 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Wrong template configuration")
424 763ad5be Thomas Thrainer
    remote_node = secondary_nodes[0]
425 763ad5be Thomas Thrainer
    minors = lu.cfg.AllocateDRBDMinor(
426 763ad5be Thomas Thrainer
      [primary_node, remote_node] * len(disk_info), instance_name)
427 763ad5be Thomas Thrainer
428 763ad5be Thomas Thrainer
    (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
429 763ad5be Thomas Thrainer
                                                       full_disk_params)
430 763ad5be Thomas Thrainer
    drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
431 763ad5be Thomas Thrainer
432 763ad5be Thomas Thrainer
    names = []
433 763ad5be Thomas Thrainer
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
434 763ad5be Thomas Thrainer
                                               for i in range(disk_count)]):
435 763ad5be Thomas Thrainer
      names.append(lv_prefix + "_data")
436 763ad5be Thomas Thrainer
      names.append(lv_prefix + "_meta")
437 763ad5be Thomas Thrainer
    for idx, disk in enumerate(disk_info):
438 763ad5be Thomas Thrainer
      disk_index = idx + base_index
439 763ad5be Thomas Thrainer
      data_vg = disk.get(constants.IDISK_VG, vgname)
440 763ad5be Thomas Thrainer
      meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
441 763ad5be Thomas Thrainer
      disk_dev = _GenerateDRBD8Branch(lu, primary_node, remote_node,
442 763ad5be Thomas Thrainer
                                      disk[constants.IDISK_SIZE],
443 763ad5be Thomas Thrainer
                                      [data_vg, meta_vg],
444 763ad5be Thomas Thrainer
                                      names[idx * 2:idx * 2 + 2],
445 763ad5be Thomas Thrainer
                                      "disk/%d" % disk_index,
446 763ad5be Thomas Thrainer
                                      minors[idx * 2], minors[idx * 2 + 1])
447 763ad5be Thomas Thrainer
      disk_dev.mode = disk[constants.IDISK_MODE]
448 763ad5be Thomas Thrainer
      disk_dev.name = disk.get(constants.IDISK_NAME, None)
449 763ad5be Thomas Thrainer
      disks.append(disk_dev)
450 763ad5be Thomas Thrainer
  else:
451 763ad5be Thomas Thrainer
    if secondary_nodes:
452 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Wrong template configuration")
453 763ad5be Thomas Thrainer
454 763ad5be Thomas Thrainer
    if template_name == constants.DT_FILE:
455 763ad5be Thomas Thrainer
      _req_file_storage()
456 763ad5be Thomas Thrainer
    elif template_name == constants.DT_SHARED_FILE:
457 763ad5be Thomas Thrainer
      _req_shr_file_storage()
458 763ad5be Thomas Thrainer
459 763ad5be Thomas Thrainer
    name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
460 763ad5be Thomas Thrainer
    if name_prefix is None:
461 763ad5be Thomas Thrainer
      names = None
462 763ad5be Thomas Thrainer
    else:
463 763ad5be Thomas Thrainer
      names = _GenerateUniqueNames(lu, ["%s.disk%s" %
464 763ad5be Thomas Thrainer
                                        (name_prefix, base_index + i)
465 763ad5be Thomas Thrainer
                                        for i in range(disk_count)])
466 763ad5be Thomas Thrainer
467 763ad5be Thomas Thrainer
    if template_name == constants.DT_PLAIN:
468 763ad5be Thomas Thrainer
469 763ad5be Thomas Thrainer
      def logical_id_fn(idx, _, disk):
470 763ad5be Thomas Thrainer
        vg = disk.get(constants.IDISK_VG, vgname)
471 763ad5be Thomas Thrainer
        return (vg, names[idx])
472 763ad5be Thomas Thrainer
473 763ad5be Thomas Thrainer
    elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
474 763ad5be Thomas Thrainer
      logical_id_fn = \
475 763ad5be Thomas Thrainer
        lambda _, disk_index, disk: (file_driver,
476 763ad5be Thomas Thrainer
                                     "%s/disk%d" % (file_storage_dir,
477 763ad5be Thomas Thrainer
                                                    disk_index))
478 763ad5be Thomas Thrainer
    elif template_name == constants.DT_BLOCK:
479 763ad5be Thomas Thrainer
      logical_id_fn = \
480 763ad5be Thomas Thrainer
        lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
481 763ad5be Thomas Thrainer
                                       disk[constants.IDISK_ADOPT])
482 763ad5be Thomas Thrainer
    elif template_name == constants.DT_RBD:
483 763ad5be Thomas Thrainer
      logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
484 763ad5be Thomas Thrainer
    elif template_name == constants.DT_EXT:
485 763ad5be Thomas Thrainer
      def logical_id_fn(idx, _, disk):
486 763ad5be Thomas Thrainer
        provider = disk.get(constants.IDISK_PROVIDER, None)
487 763ad5be Thomas Thrainer
        if provider is None:
488 763ad5be Thomas Thrainer
          raise errors.ProgrammerError("Disk template is %s, but '%s' is"
489 763ad5be Thomas Thrainer
                                       " not found", constants.DT_EXT,
490 763ad5be Thomas Thrainer
                                       constants.IDISK_PROVIDER)
491 763ad5be Thomas Thrainer
        return (provider, names[idx])
492 763ad5be Thomas Thrainer
    else:
493 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
494 763ad5be Thomas Thrainer
495 763ad5be Thomas Thrainer
    dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
496 763ad5be Thomas Thrainer
497 763ad5be Thomas Thrainer
    for idx, disk in enumerate(disk_info):
498 763ad5be Thomas Thrainer
      params = {}
499 763ad5be Thomas Thrainer
      # Only for the Ext template add disk_info to params
500 763ad5be Thomas Thrainer
      if template_name == constants.DT_EXT:
501 763ad5be Thomas Thrainer
        params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
502 763ad5be Thomas Thrainer
        for key in disk:
503 763ad5be Thomas Thrainer
          if key not in constants.IDISK_PARAMS:
504 763ad5be Thomas Thrainer
            params[key] = disk[key]
505 763ad5be Thomas Thrainer
      disk_index = idx + base_index
506 763ad5be Thomas Thrainer
      size = disk[constants.IDISK_SIZE]
507 763ad5be Thomas Thrainer
      feedback_fn("* disk %s, size %s" %
508 763ad5be Thomas Thrainer
                  (disk_index, utils.FormatUnit(size, "h")))
509 763ad5be Thomas Thrainer
      disk_dev = objects.Disk(dev_type=dev_type, size=size,
510 763ad5be Thomas Thrainer
                              logical_id=logical_id_fn(idx, disk_index, disk),
511 763ad5be Thomas Thrainer
                              iv_name="disk/%d" % disk_index,
512 763ad5be Thomas Thrainer
                              mode=disk[constants.IDISK_MODE],
513 763ad5be Thomas Thrainer
                              params=params)
514 763ad5be Thomas Thrainer
      disk_dev.name = disk.get(constants.IDISK_NAME, None)
515 763ad5be Thomas Thrainer
      disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
516 763ad5be Thomas Thrainer
      disks.append(disk_dev)
517 763ad5be Thomas Thrainer
518 763ad5be Thomas Thrainer
  return disks
519 763ad5be Thomas Thrainer
520 763ad5be Thomas Thrainer
521 3f3ea14c Bernardo Dal Seno
def CheckSpindlesExclusiveStorage(diskdict, es_flag):
522 3f3ea14c Bernardo Dal Seno
  """Check the presence of the spindle options with exclusive_storage.
523 3f3ea14c Bernardo Dal Seno

524 3f3ea14c Bernardo Dal Seno
  @type diskdict: dict
525 3f3ea14c Bernardo Dal Seno
  @param diskdict: disk parameters
526 3f3ea14c Bernardo Dal Seno
  @type es_flag: bool
527 3f3ea14c Bernardo Dal Seno
  @param es_flag: the effective value of the exlusive_storage flag
528 3f3ea14c Bernardo Dal Seno
  @raise errors.OpPrereqError when spindles are given and they should not
529 3f3ea14c Bernardo Dal Seno

530 3f3ea14c Bernardo Dal Seno
  """
531 3f3ea14c Bernardo Dal Seno
  if (not es_flag and constants.IDISK_SPINDLES in diskdict and
532 3f3ea14c Bernardo Dal Seno
      diskdict[constants.IDISK_SPINDLES] is not None):
533 3f3ea14c Bernardo Dal Seno
    raise errors.OpPrereqError("Spindles in instance disks cannot be specified"
534 3f3ea14c Bernardo Dal Seno
                               " when exclusive storage is not active",
535 3f3ea14c Bernardo Dal Seno
                               errors.ECODE_INVAL)
536 3f3ea14c Bernardo Dal Seno
537 3f3ea14c Bernardo Dal Seno
538 763ad5be Thomas Thrainer
class LUInstanceRecreateDisks(LogicalUnit):
539 763ad5be Thomas Thrainer
  """Recreate an instance's missing disks.
540 763ad5be Thomas Thrainer

541 763ad5be Thomas Thrainer
  """
542 763ad5be Thomas Thrainer
  HPATH = "instance-recreate-disks"
543 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
544 763ad5be Thomas Thrainer
  REQ_BGL = False
545 763ad5be Thomas Thrainer
546 763ad5be Thomas Thrainer
  _MODIFYABLE = compat.UniqueFrozenset([
547 763ad5be Thomas Thrainer
    constants.IDISK_SIZE,
548 763ad5be Thomas Thrainer
    constants.IDISK_MODE,
549 c615590c Bernardo Dal Seno
    constants.IDISK_SPINDLES,
550 763ad5be Thomas Thrainer
    ])
551 763ad5be Thomas Thrainer
552 763ad5be Thomas Thrainer
  # New or changed disk parameters may have different semantics
553 763ad5be Thomas Thrainer
  assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
554 763ad5be Thomas Thrainer
    constants.IDISK_ADOPT,
555 763ad5be Thomas Thrainer
556 763ad5be Thomas Thrainer
    # TODO: Implement support changing VG while recreating
557 763ad5be Thomas Thrainer
    constants.IDISK_VG,
558 763ad5be Thomas Thrainer
    constants.IDISK_METAVG,
559 763ad5be Thomas Thrainer
    constants.IDISK_PROVIDER,
560 763ad5be Thomas Thrainer
    constants.IDISK_NAME,
561 763ad5be Thomas Thrainer
    ]))
562 763ad5be Thomas Thrainer
563 763ad5be Thomas Thrainer
  def _RunAllocator(self):
564 763ad5be Thomas Thrainer
    """Run the allocator based on input opcode.
565 763ad5be Thomas Thrainer

566 763ad5be Thomas Thrainer
    """
567 763ad5be Thomas Thrainer
    be_full = self.cfg.GetClusterInfo().FillBE(self.instance)
568 763ad5be Thomas Thrainer
569 763ad5be Thomas Thrainer
    # FIXME
570 763ad5be Thomas Thrainer
    # The allocator should actually run in "relocate" mode, but current
571 763ad5be Thomas Thrainer
    # allocators don't support relocating all the nodes of an instance at
572 763ad5be Thomas Thrainer
    # the same time. As a workaround we use "allocate" mode, but this is
573 763ad5be Thomas Thrainer
    # suboptimal for two reasons:
574 763ad5be Thomas Thrainer
    # - The instance name passed to the allocator is present in the list of
575 763ad5be Thomas Thrainer
    #   existing instances, so there could be a conflict within the
576 763ad5be Thomas Thrainer
    #   internal structures of the allocator. This doesn't happen with the
577 763ad5be Thomas Thrainer
    #   current allocators, but it's a liability.
578 763ad5be Thomas Thrainer
    # - The allocator counts the resources used by the instance twice: once
579 763ad5be Thomas Thrainer
    #   because the instance exists already, and once because it tries to
580 763ad5be Thomas Thrainer
    #   allocate a new instance.
581 763ad5be Thomas Thrainer
    # The allocator could choose some of the nodes on which the instance is
582 763ad5be Thomas Thrainer
    # running, but that's not a problem. If the instance nodes are broken,
583 763ad5be Thomas Thrainer
    # they should be already be marked as drained or offline, and hence
584 763ad5be Thomas Thrainer
    # skipped by the allocator. If instance disks have been lost for other
585 763ad5be Thomas Thrainer
    # reasons, then recreating the disks on the same nodes should be fine.
586 763ad5be Thomas Thrainer
    disk_template = self.instance.disk_template
587 763ad5be Thomas Thrainer
    spindle_use = be_full[constants.BE_SPINDLE_USE]
588 763ad5be Thomas Thrainer
    req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
589 763ad5be Thomas Thrainer
                                        disk_template=disk_template,
590 763ad5be Thomas Thrainer
                                        tags=list(self.instance.GetTags()),
591 763ad5be Thomas Thrainer
                                        os=self.instance.os,
592 763ad5be Thomas Thrainer
                                        nics=[{}],
593 763ad5be Thomas Thrainer
                                        vcpus=be_full[constants.BE_VCPUS],
594 763ad5be Thomas Thrainer
                                        memory=be_full[constants.BE_MAXMEM],
595 763ad5be Thomas Thrainer
                                        spindle_use=spindle_use,
596 763ad5be Thomas Thrainer
                                        disks=[{constants.IDISK_SIZE: d.size,
597 763ad5be Thomas Thrainer
                                                constants.IDISK_MODE: d.mode}
598 763ad5be Thomas Thrainer
                                               for d in self.instance.disks],
599 763ad5be Thomas Thrainer
                                        hypervisor=self.instance.hypervisor,
600 763ad5be Thomas Thrainer
                                        node_whitelist=None)
601 763ad5be Thomas Thrainer
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
602 763ad5be Thomas Thrainer
603 763ad5be Thomas Thrainer
    ial.Run(self.op.iallocator)
604 763ad5be Thomas Thrainer
605 763ad5be Thomas Thrainer
    assert req.RequiredNodes() == len(self.instance.all_nodes)
606 763ad5be Thomas Thrainer
607 763ad5be Thomas Thrainer
    if not ial.success:
608 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
609 763ad5be Thomas Thrainer
                                 " %s" % (self.op.iallocator, ial.info),
610 763ad5be Thomas Thrainer
                                 errors.ECODE_NORES)
611 763ad5be Thomas Thrainer
612 763ad5be Thomas Thrainer
    self.op.nodes = ial.result
613 763ad5be Thomas Thrainer
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
614 763ad5be Thomas Thrainer
                 self.op.instance_name, self.op.iallocator,
615 763ad5be Thomas Thrainer
                 utils.CommaJoin(ial.result))
616 763ad5be Thomas Thrainer
617 763ad5be Thomas Thrainer
  def CheckArguments(self):
618 763ad5be Thomas Thrainer
    if self.op.disks and ht.TNonNegativeInt(self.op.disks[0]):
619 763ad5be Thomas Thrainer
      # Normalize and convert deprecated list of disk indices
620 763ad5be Thomas Thrainer
      self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
621 763ad5be Thomas Thrainer
622 763ad5be Thomas Thrainer
    duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
623 763ad5be Thomas Thrainer
    if duplicates:
624 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Some disks have been specified more than"
625 763ad5be Thomas Thrainer
                                 " once: %s" % utils.CommaJoin(duplicates),
626 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
627 763ad5be Thomas Thrainer
628 763ad5be Thomas Thrainer
    # We don't want _CheckIAllocatorOrNode selecting the default iallocator
629 763ad5be Thomas Thrainer
    # when neither iallocator nor nodes are specified
630 763ad5be Thomas Thrainer
    if self.op.iallocator or self.op.nodes:
631 5eacbcae Thomas Thrainer
      CheckIAllocatorOrNode(self, "iallocator", "nodes")
632 763ad5be Thomas Thrainer
633 763ad5be Thomas Thrainer
    for (idx, params) in self.op.disks:
634 763ad5be Thomas Thrainer
      utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
635 763ad5be Thomas Thrainer
      unsupported = frozenset(params.keys()) - self._MODIFYABLE
636 763ad5be Thomas Thrainer
      if unsupported:
637 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Parameters for disk %s try to change"
638 763ad5be Thomas Thrainer
                                   " unmodifyable parameter(s): %s" %
639 763ad5be Thomas Thrainer
                                   (idx, utils.CommaJoin(unsupported)),
640 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
641 763ad5be Thomas Thrainer
642 763ad5be Thomas Thrainer
  def ExpandNames(self):
643 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
644 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
645 763ad5be Thomas Thrainer
646 763ad5be Thomas Thrainer
    if self.op.nodes:
647 5eacbcae Thomas Thrainer
      self.op.nodes = [ExpandNodeName(self.cfg, n) for n in self.op.nodes]
648 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
649 763ad5be Thomas Thrainer
    else:
650 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = []
651 763ad5be Thomas Thrainer
      if self.op.iallocator:
652 763ad5be Thomas Thrainer
        # iallocator will select a new node in the same group
653 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
654 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
655 763ad5be Thomas Thrainer
656 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
657 763ad5be Thomas Thrainer
658 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
659 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODEGROUP:
660 763ad5be Thomas Thrainer
      assert self.op.iallocator is not None
661 763ad5be Thomas Thrainer
      assert not self.op.nodes
662 763ad5be Thomas Thrainer
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
663 763ad5be Thomas Thrainer
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
664 763ad5be Thomas Thrainer
      # Lock the primary group used by the instance optimistically; this
665 763ad5be Thomas Thrainer
      # requires going via the node before it's locked, requiring
666 763ad5be Thomas Thrainer
      # verification later on
667 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
668 763ad5be Thomas Thrainer
        self.cfg.GetInstanceNodeGroups(self.op.instance_name, primary_only=True)
669 763ad5be Thomas Thrainer
670 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE:
671 763ad5be Thomas Thrainer
      # If an allocator is used, then we lock all the nodes in the current
672 763ad5be Thomas Thrainer
      # instance group, as we don't know yet which ones will be selected;
673 763ad5be Thomas Thrainer
      # if we replace the nodes without using an allocator, locks are
674 763ad5be Thomas Thrainer
      # already declared in ExpandNames; otherwise, we need to lock all the
675 763ad5be Thomas Thrainer
      # instance nodes for disk re-creation
676 763ad5be Thomas Thrainer
      if self.op.iallocator:
677 763ad5be Thomas Thrainer
        assert not self.op.nodes
678 763ad5be Thomas Thrainer
        assert not self.needed_locks[locking.LEVEL_NODE]
679 763ad5be Thomas Thrainer
        assert len(self.owned_locks(locking.LEVEL_NODEGROUP)) == 1
680 763ad5be Thomas Thrainer
681 763ad5be Thomas Thrainer
        # Lock member nodes of the group of the primary node
682 763ad5be Thomas Thrainer
        for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP):
683 763ad5be Thomas Thrainer
          self.needed_locks[locking.LEVEL_NODE].extend(
684 763ad5be Thomas Thrainer
            self.cfg.GetNodeGroup(group_uuid).members)
685 763ad5be Thomas Thrainer
686 763ad5be Thomas Thrainer
        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
687 763ad5be Thomas Thrainer
      elif not self.op.nodes:
688 763ad5be Thomas Thrainer
        self._LockInstancesNodes(primary_only=False)
689 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
690 763ad5be Thomas Thrainer
      # Copy node locks
691 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
692 5eacbcae Thomas Thrainer
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
693 763ad5be Thomas Thrainer
694 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
695 763ad5be Thomas Thrainer
    """Build hooks env.
696 763ad5be Thomas Thrainer

697 763ad5be Thomas Thrainer
    This runs on master, primary and secondary nodes of the instance.
698 763ad5be Thomas Thrainer

699 763ad5be Thomas Thrainer
    """
700 5eacbcae Thomas Thrainer
    return BuildInstanceHookEnvByObject(self, self.instance)
701 763ad5be Thomas Thrainer
702 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
703 763ad5be Thomas Thrainer
    """Build hooks nodes.
704 763ad5be Thomas Thrainer

705 763ad5be Thomas Thrainer
    """
706 763ad5be Thomas Thrainer
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
707 763ad5be Thomas Thrainer
    return (nl, nl)
708 763ad5be Thomas Thrainer
709 763ad5be Thomas Thrainer
  def CheckPrereq(self):
710 763ad5be Thomas Thrainer
    """Check prerequisites.
711 763ad5be Thomas Thrainer

712 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster and is not running.
713 763ad5be Thomas Thrainer

714 763ad5be Thomas Thrainer
    """
715 763ad5be Thomas Thrainer
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
716 763ad5be Thomas Thrainer
    assert instance is not None, \
717 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
718 763ad5be Thomas Thrainer
    if self.op.nodes:
719 763ad5be Thomas Thrainer
      if len(self.op.nodes) != len(instance.all_nodes):
720 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
721 763ad5be Thomas Thrainer
                                   " %d replacement nodes were specified" %
722 763ad5be Thomas Thrainer
                                   (instance.name, len(instance.all_nodes),
723 763ad5be Thomas Thrainer
                                    len(self.op.nodes)),
724 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
725 763ad5be Thomas Thrainer
      assert instance.disk_template != constants.DT_DRBD8 or \
726 763ad5be Thomas Thrainer
             len(self.op.nodes) == 2
727 763ad5be Thomas Thrainer
      assert instance.disk_template != constants.DT_PLAIN or \
728 763ad5be Thomas Thrainer
             len(self.op.nodes) == 1
729 763ad5be Thomas Thrainer
      primary_node = self.op.nodes[0]
730 763ad5be Thomas Thrainer
    else:
731 763ad5be Thomas Thrainer
      primary_node = instance.primary_node
732 763ad5be Thomas Thrainer
    if not self.op.iallocator:
733 5eacbcae Thomas Thrainer
      CheckNodeOnline(self, primary_node)
734 763ad5be Thomas Thrainer
735 763ad5be Thomas Thrainer
    if instance.disk_template == constants.DT_DISKLESS:
736 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Instance '%s' has no disks" %
737 763ad5be Thomas Thrainer
                                 self.op.instance_name, errors.ECODE_INVAL)
738 763ad5be Thomas Thrainer
739 763ad5be Thomas Thrainer
    # Verify if node group locks are still correct
740 763ad5be Thomas Thrainer
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
741 763ad5be Thomas Thrainer
    if owned_groups:
742 763ad5be Thomas Thrainer
      # Node group locks are acquired only for the primary node (and only
743 763ad5be Thomas Thrainer
      # when the allocator is used)
744 5eacbcae Thomas Thrainer
      CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups,
745 5eacbcae Thomas Thrainer
                              primary_only=True)
746 763ad5be Thomas Thrainer
747 763ad5be Thomas Thrainer
    # if we replace nodes *and* the old primary is offline, we don't
748 763ad5be Thomas Thrainer
    # check the instance state
749 763ad5be Thomas Thrainer
    old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
750 763ad5be Thomas Thrainer
    if not ((self.op.iallocator or self.op.nodes) and old_pnode.offline):
751 5eacbcae Thomas Thrainer
      CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
752 5eacbcae Thomas Thrainer
                         msg="cannot recreate disks")
753 763ad5be Thomas Thrainer
754 763ad5be Thomas Thrainer
    if self.op.disks:
755 763ad5be Thomas Thrainer
      self.disks = dict(self.op.disks)
756 763ad5be Thomas Thrainer
    else:
757 763ad5be Thomas Thrainer
      self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
758 763ad5be Thomas Thrainer
759 763ad5be Thomas Thrainer
    maxidx = max(self.disks.keys())
760 763ad5be Thomas Thrainer
    if maxidx >= len(instance.disks):
761 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
762 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
763 763ad5be Thomas Thrainer
764 763ad5be Thomas Thrainer
    if ((self.op.nodes or self.op.iallocator) and
765 763ad5be Thomas Thrainer
         sorted(self.disks.keys()) != range(len(instance.disks))):
766 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't recreate disks partially and"
767 763ad5be Thomas Thrainer
                                 " change the nodes at the same time",
768 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
769 763ad5be Thomas Thrainer
770 763ad5be Thomas Thrainer
    self.instance = instance
771 763ad5be Thomas Thrainer
772 763ad5be Thomas Thrainer
    if self.op.iallocator:
773 763ad5be Thomas Thrainer
      self._RunAllocator()
774 763ad5be Thomas Thrainer
      # Release unneeded node and node resource locks
775 5eacbcae Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.nodes)
776 5eacbcae Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.nodes)
777 5eacbcae Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
778 763ad5be Thomas Thrainer
779 763ad5be Thomas Thrainer
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
780 763ad5be Thomas Thrainer
781 3f3ea14c Bernardo Dal Seno
    if self.op.nodes:
782 3f3ea14c Bernardo Dal Seno
      nodes = self.op.nodes
783 3f3ea14c Bernardo Dal Seno
    else:
784 3f3ea14c Bernardo Dal Seno
      nodes = instance.all_nodes
785 3f3ea14c Bernardo Dal Seno
    excl_stor = compat.any(
786 3f3ea14c Bernardo Dal Seno
      rpc.GetExclusiveStorageForNodeNames(self.cfg, nodes).values()
787 3f3ea14c Bernardo Dal Seno
      )
788 3f3ea14c Bernardo Dal Seno
    for new_params in self.disks.values():
789 3f3ea14c Bernardo Dal Seno
      CheckSpindlesExclusiveStorage(new_params, excl_stor)
790 3f3ea14c Bernardo Dal Seno
791 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
792 763ad5be Thomas Thrainer
    """Recreate the disks.
793 763ad5be Thomas Thrainer

794 763ad5be Thomas Thrainer
    """
795 763ad5be Thomas Thrainer
    instance = self.instance
796 763ad5be Thomas Thrainer
797 763ad5be Thomas Thrainer
    assert (self.owned_locks(locking.LEVEL_NODE) ==
798 763ad5be Thomas Thrainer
            self.owned_locks(locking.LEVEL_NODE_RES))
799 763ad5be Thomas Thrainer
800 763ad5be Thomas Thrainer
    to_skip = []
801 763ad5be Thomas Thrainer
    mods = [] # keeps track of needed changes
802 763ad5be Thomas Thrainer
803 763ad5be Thomas Thrainer
    for idx, disk in enumerate(instance.disks):
804 763ad5be Thomas Thrainer
      try:
805 763ad5be Thomas Thrainer
        changes = self.disks[idx]
806 763ad5be Thomas Thrainer
      except KeyError:
807 763ad5be Thomas Thrainer
        # Disk should not be recreated
808 763ad5be Thomas Thrainer
        to_skip.append(idx)
809 763ad5be Thomas Thrainer
        continue
810 763ad5be Thomas Thrainer
811 763ad5be Thomas Thrainer
      # update secondaries for disks, if needed
812 763ad5be Thomas Thrainer
      if self.op.nodes and disk.dev_type == constants.LD_DRBD8:
813 763ad5be Thomas Thrainer
        # need to update the nodes and minors
814 763ad5be Thomas Thrainer
        assert len(self.op.nodes) == 2
815 763ad5be Thomas Thrainer
        assert len(disk.logical_id) == 6 # otherwise disk internals
816 763ad5be Thomas Thrainer
                                         # have changed
817 763ad5be Thomas Thrainer
        (_, _, old_port, _, _, old_secret) = disk.logical_id
818 763ad5be Thomas Thrainer
        new_minors = self.cfg.AllocateDRBDMinor(self.op.nodes, instance.name)
819 763ad5be Thomas Thrainer
        new_id = (self.op.nodes[0], self.op.nodes[1], old_port,
820 763ad5be Thomas Thrainer
                  new_minors[0], new_minors[1], old_secret)
821 763ad5be Thomas Thrainer
        assert len(disk.logical_id) == len(new_id)
822 763ad5be Thomas Thrainer
      else:
823 763ad5be Thomas Thrainer
        new_id = None
824 763ad5be Thomas Thrainer
825 763ad5be Thomas Thrainer
      mods.append((idx, new_id, changes))
826 763ad5be Thomas Thrainer
827 763ad5be Thomas Thrainer
    # now that we have passed all asserts above, we can apply the mods
828 763ad5be Thomas Thrainer
    # in a single run (to avoid partial changes)
829 763ad5be Thomas Thrainer
    for idx, new_id, changes in mods:
830 763ad5be Thomas Thrainer
      disk = instance.disks[idx]
831 763ad5be Thomas Thrainer
      if new_id is not None:
832 763ad5be Thomas Thrainer
        assert disk.dev_type == constants.LD_DRBD8
833 763ad5be Thomas Thrainer
        disk.logical_id = new_id
834 763ad5be Thomas Thrainer
      if changes:
835 763ad5be Thomas Thrainer
        disk.Update(size=changes.get(constants.IDISK_SIZE, None),
836 763ad5be Thomas Thrainer
                    mode=changes.get(constants.IDISK_MODE, None))
837 763ad5be Thomas Thrainer
838 763ad5be Thomas Thrainer
    # change primary node, if needed
839 763ad5be Thomas Thrainer
    if self.op.nodes:
840 763ad5be Thomas Thrainer
      instance.primary_node = self.op.nodes[0]
841 763ad5be Thomas Thrainer
      self.LogWarning("Changing the instance's nodes, you will have to"
842 763ad5be Thomas Thrainer
                      " remove any disks left on the older nodes manually")
843 763ad5be Thomas Thrainer
844 763ad5be Thomas Thrainer
    if self.op.nodes:
845 763ad5be Thomas Thrainer
      self.cfg.Update(instance, feedback_fn)
846 763ad5be Thomas Thrainer
847 763ad5be Thomas Thrainer
    # All touched nodes must be locked
848 763ad5be Thomas Thrainer
    mylocks = self.owned_locks(locking.LEVEL_NODE)
849 763ad5be Thomas Thrainer
    assert mylocks.issuperset(frozenset(instance.all_nodes))
850 a365b47f Bernardo Dal Seno
    new_disks = CreateDisks(self, instance, to_skip=to_skip)
851 a365b47f Bernardo Dal Seno
852 a365b47f Bernardo Dal Seno
    # TODO: Release node locks before wiping, or explain why it's not possible
853 a365b47f Bernardo Dal Seno
    if self.cfg.GetClusterInfo().prealloc_wipe_disks:
854 a365b47f Bernardo Dal Seno
      wipedisks = [(idx, disk, 0)
855 a365b47f Bernardo Dal Seno
                   for (idx, disk) in enumerate(instance.disks)
856 a365b47f Bernardo Dal Seno
                   if idx not in to_skip]
857 a365b47f Bernardo Dal Seno
      WipeOrCleanupDisks(self, instance, disks=wipedisks, cleanup=new_disks)
858 763ad5be Thomas Thrainer
859 763ad5be Thomas Thrainer
860 763ad5be Thomas Thrainer
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
861 763ad5be Thomas Thrainer
  """Checks if nodes have enough free disk space in the specified VG.
862 763ad5be Thomas Thrainer

863 763ad5be Thomas Thrainer
  This function checks if all given nodes have the needed amount of
864 763ad5be Thomas Thrainer
  free disk. In case any node has less disk or we cannot get the
865 763ad5be Thomas Thrainer
  information from the node, this function raises an OpPrereqError
866 763ad5be Thomas Thrainer
  exception.
867 763ad5be Thomas Thrainer

868 763ad5be Thomas Thrainer
  @type lu: C{LogicalUnit}
869 763ad5be Thomas Thrainer
  @param lu: a logical unit from which we get configuration data
870 763ad5be Thomas Thrainer
  @type nodenames: C{list}
871 763ad5be Thomas Thrainer
  @param nodenames: the list of node names to check
872 763ad5be Thomas Thrainer
  @type vg: C{str}
873 763ad5be Thomas Thrainer
  @param vg: the volume group to check
874 763ad5be Thomas Thrainer
  @type requested: C{int}
875 763ad5be Thomas Thrainer
  @param requested: the amount of disk in MiB to check for
876 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
877 763ad5be Thomas Thrainer
      or we cannot check the node
878 763ad5be Thomas Thrainer

879 763ad5be Thomas Thrainer
  """
880 763ad5be Thomas Thrainer
  es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, nodenames)
881 4b92e992 Helga Velroyen
  # FIXME: This maps everything to storage type 'lvm-vg' to maintain
882 4b92e992 Helga Velroyen
  # the current functionality. Refactor to make it more flexible.
883 4b92e992 Helga Velroyen
  nodeinfo = lu.rpc.call_node_info(nodenames, [(constants.ST_LVM_VG, vg)], None,
884 4b92e992 Helga Velroyen
                                   es_flags)
885 763ad5be Thomas Thrainer
  for node in nodenames:
886 763ad5be Thomas Thrainer
    info = nodeinfo[node]
887 763ad5be Thomas Thrainer
    info.Raise("Cannot get current information from node %s" % node,
888 763ad5be Thomas Thrainer
               prereq=True, ecode=errors.ECODE_ENVIRON)
889 763ad5be Thomas Thrainer
    (_, (vg_info, ), _) = info.payload
890 763ad5be Thomas Thrainer
    vg_free = vg_info.get("vg_free", None)
891 763ad5be Thomas Thrainer
    if not isinstance(vg_free, int):
892 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't compute free disk space on node"
893 763ad5be Thomas Thrainer
                                 " %s for vg %s, result was '%s'" %
894 763ad5be Thomas Thrainer
                                 (node, vg, vg_free), errors.ECODE_ENVIRON)
895 763ad5be Thomas Thrainer
    if requested > vg_free:
896 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Not enough disk space on target node %s"
897 763ad5be Thomas Thrainer
                                 " vg %s: required %d MiB, available %d MiB" %
898 763ad5be Thomas Thrainer
                                 (node, vg, requested, vg_free),
899 763ad5be Thomas Thrainer
                                 errors.ECODE_NORES)
900 763ad5be Thomas Thrainer
901 763ad5be Thomas Thrainer
902 5eacbcae Thomas Thrainer
def CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
903 763ad5be Thomas Thrainer
  """Checks if nodes have enough free disk space in all the VGs.
904 763ad5be Thomas Thrainer

905 763ad5be Thomas Thrainer
  This function checks if all given nodes have the needed amount of
906 763ad5be Thomas Thrainer
  free disk. In case any node has less disk or we cannot get the
907 763ad5be Thomas Thrainer
  information from the node, this function raises an OpPrereqError
908 763ad5be Thomas Thrainer
  exception.
909 763ad5be Thomas Thrainer

910 763ad5be Thomas Thrainer
  @type lu: C{LogicalUnit}
911 763ad5be Thomas Thrainer
  @param lu: a logical unit from which we get configuration data
912 763ad5be Thomas Thrainer
  @type nodenames: C{list}
913 763ad5be Thomas Thrainer
  @param nodenames: the list of node names to check
914 763ad5be Thomas Thrainer
  @type req_sizes: C{dict}
915 763ad5be Thomas Thrainer
  @param req_sizes: the hash of vg and corresponding amount of disk in
916 763ad5be Thomas Thrainer
      MiB to check for
917 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
918 763ad5be Thomas Thrainer
      or we cannot check the node
919 763ad5be Thomas Thrainer

920 763ad5be Thomas Thrainer
  """
921 763ad5be Thomas Thrainer
  for vg, req_size in req_sizes.items():
922 763ad5be Thomas Thrainer
    _CheckNodesFreeDiskOnVG(lu, nodenames, vg, req_size)
923 763ad5be Thomas Thrainer
924 763ad5be Thomas Thrainer
925 763ad5be Thomas Thrainer
def _DiskSizeInBytesToMebibytes(lu, size):
926 763ad5be Thomas Thrainer
  """Converts a disk size in bytes to mebibytes.
927 763ad5be Thomas Thrainer

928 763ad5be Thomas Thrainer
  Warns and rounds up if the size isn't an even multiple of 1 MiB.
929 763ad5be Thomas Thrainer

930 763ad5be Thomas Thrainer
  """
931 763ad5be Thomas Thrainer
  (mib, remainder) = divmod(size, 1024 * 1024)
932 763ad5be Thomas Thrainer
933 763ad5be Thomas Thrainer
  if remainder != 0:
934 763ad5be Thomas Thrainer
    lu.LogWarning("Disk size is not an even multiple of 1 MiB; rounding up"
935 763ad5be Thomas Thrainer
                  " to not overwrite existing data (%s bytes will not be"
936 763ad5be Thomas Thrainer
                  " wiped)", (1024 * 1024) - remainder)
937 763ad5be Thomas Thrainer
    mib += 1
938 763ad5be Thomas Thrainer
939 763ad5be Thomas Thrainer
  return mib
940 763ad5be Thomas Thrainer
941 763ad5be Thomas Thrainer
942 763ad5be Thomas Thrainer
def _CalcEta(time_taken, written, total_size):
943 763ad5be Thomas Thrainer
  """Calculates the ETA based on size written and total size.
944 763ad5be Thomas Thrainer

945 763ad5be Thomas Thrainer
  @param time_taken: The time taken so far
946 763ad5be Thomas Thrainer
  @param written: amount written so far
947 763ad5be Thomas Thrainer
  @param total_size: The total size of data to be written
948 763ad5be Thomas Thrainer
  @return: The remaining time in seconds
949 763ad5be Thomas Thrainer

950 763ad5be Thomas Thrainer
  """
951 763ad5be Thomas Thrainer
  avg_time = time_taken / float(written)
952 763ad5be Thomas Thrainer
  return (total_size - written) * avg_time
953 763ad5be Thomas Thrainer
954 763ad5be Thomas Thrainer
955 5eacbcae Thomas Thrainer
def WipeDisks(lu, instance, disks=None):
956 763ad5be Thomas Thrainer
  """Wipes instance disks.
957 763ad5be Thomas Thrainer

958 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
959 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
960 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
961 763ad5be Thomas Thrainer
  @param instance: the instance whose disks we should create
962 763ad5be Thomas Thrainer
  @type disks: None or list of tuple of (number, L{objects.Disk}, number)
963 763ad5be Thomas Thrainer
  @param disks: Disk details; tuple contains disk index, disk object and the
964 763ad5be Thomas Thrainer
    start offset
965 763ad5be Thomas Thrainer

966 763ad5be Thomas Thrainer
  """
967 763ad5be Thomas Thrainer
  node = instance.primary_node
968 763ad5be Thomas Thrainer
969 763ad5be Thomas Thrainer
  if disks is None:
970 763ad5be Thomas Thrainer
    disks = [(idx, disk, 0)
971 763ad5be Thomas Thrainer
             for (idx, disk) in enumerate(instance.disks)]
972 763ad5be Thomas Thrainer
973 763ad5be Thomas Thrainer
  for (_, device, _) in disks:
974 763ad5be Thomas Thrainer
    lu.cfg.SetDiskID(device, node)
975 763ad5be Thomas Thrainer
976 763ad5be Thomas Thrainer
  logging.info("Pausing synchronization of disks of instance '%s'",
977 763ad5be Thomas Thrainer
               instance.name)
978 763ad5be Thomas Thrainer
  result = lu.rpc.call_blockdev_pause_resume_sync(node,
979 763ad5be Thomas Thrainer
                                                  (map(compat.snd, disks),
980 763ad5be Thomas Thrainer
                                                   instance),
981 763ad5be Thomas Thrainer
                                                  True)
982 763ad5be Thomas Thrainer
  result.Raise("Failed to pause disk synchronization on node '%s'" % node)
983 763ad5be Thomas Thrainer
984 763ad5be Thomas Thrainer
  for idx, success in enumerate(result.payload):
985 763ad5be Thomas Thrainer
    if not success:
986 763ad5be Thomas Thrainer
      logging.warn("Pausing synchronization of disk %s of instance '%s'"
987 763ad5be Thomas Thrainer
                   " failed", idx, instance.name)
988 763ad5be Thomas Thrainer
989 763ad5be Thomas Thrainer
  try:
990 763ad5be Thomas Thrainer
    for (idx, device, offset) in disks:
991 763ad5be Thomas Thrainer
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
992 763ad5be Thomas Thrainer
      # MAX_WIPE_CHUNK at max. Truncating to integer to avoid rounding errors.
993 763ad5be Thomas Thrainer
      wipe_chunk_size = \
994 763ad5be Thomas Thrainer
        int(min(constants.MAX_WIPE_CHUNK,
995 763ad5be Thomas Thrainer
                device.size / 100.0 * constants.MIN_WIPE_CHUNK_PERCENT))
996 763ad5be Thomas Thrainer
997 763ad5be Thomas Thrainer
      size = device.size
998 763ad5be Thomas Thrainer
      last_output = 0
999 763ad5be Thomas Thrainer
      start_time = time.time()
1000 763ad5be Thomas Thrainer
1001 763ad5be Thomas Thrainer
      if offset == 0:
1002 763ad5be Thomas Thrainer
        info_text = ""
1003 763ad5be Thomas Thrainer
      else:
1004 763ad5be Thomas Thrainer
        info_text = (" (from %s to %s)" %
1005 763ad5be Thomas Thrainer
                     (utils.FormatUnit(offset, "h"),
1006 763ad5be Thomas Thrainer
                      utils.FormatUnit(size, "h")))
1007 763ad5be Thomas Thrainer
1008 763ad5be Thomas Thrainer
      lu.LogInfo("* Wiping disk %s%s", idx, info_text)
1009 763ad5be Thomas Thrainer
1010 763ad5be Thomas Thrainer
      logging.info("Wiping disk %d for instance %s on node %s using"
1011 763ad5be Thomas Thrainer
                   " chunk size %s", idx, instance.name, node, wipe_chunk_size)
1012 763ad5be Thomas Thrainer
1013 763ad5be Thomas Thrainer
      while offset < size:
1014 763ad5be Thomas Thrainer
        wipe_size = min(wipe_chunk_size, size - offset)
1015 763ad5be Thomas Thrainer
1016 763ad5be Thomas Thrainer
        logging.debug("Wiping disk %d, offset %s, chunk %s",
1017 763ad5be Thomas Thrainer
                      idx, offset, wipe_size)
1018 763ad5be Thomas Thrainer
1019 763ad5be Thomas Thrainer
        result = lu.rpc.call_blockdev_wipe(node, (device, instance), offset,
1020 763ad5be Thomas Thrainer
                                           wipe_size)
1021 763ad5be Thomas Thrainer
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
1022 763ad5be Thomas Thrainer
                     (idx, offset, wipe_size))
1023 763ad5be Thomas Thrainer
1024 763ad5be Thomas Thrainer
        now = time.time()
1025 763ad5be Thomas Thrainer
        offset += wipe_size
1026 763ad5be Thomas Thrainer
        if now - last_output >= 60:
1027 763ad5be Thomas Thrainer
          eta = _CalcEta(now - start_time, offset, size)
1028 763ad5be Thomas Thrainer
          lu.LogInfo(" - done: %.1f%% ETA: %s",
1029 763ad5be Thomas Thrainer
                     offset / float(size) * 100, utils.FormatSeconds(eta))
1030 763ad5be Thomas Thrainer
          last_output = now
1031 763ad5be Thomas Thrainer
  finally:
1032 763ad5be Thomas Thrainer
    logging.info("Resuming synchronization of disks for instance '%s'",
1033 763ad5be Thomas Thrainer
                 instance.name)
1034 763ad5be Thomas Thrainer
1035 763ad5be Thomas Thrainer
    result = lu.rpc.call_blockdev_pause_resume_sync(node,
1036 763ad5be Thomas Thrainer
                                                    (map(compat.snd, disks),
1037 763ad5be Thomas Thrainer
                                                     instance),
1038 763ad5be Thomas Thrainer
                                                    False)
1039 763ad5be Thomas Thrainer
1040 763ad5be Thomas Thrainer
    if result.fail_msg:
1041 763ad5be Thomas Thrainer
      lu.LogWarning("Failed to resume disk synchronization on node '%s': %s",
1042 763ad5be Thomas Thrainer
                    node, result.fail_msg)
1043 763ad5be Thomas Thrainer
    else:
1044 763ad5be Thomas Thrainer
      for idx, success in enumerate(result.payload):
1045 763ad5be Thomas Thrainer
        if not success:
1046 763ad5be Thomas Thrainer
          lu.LogWarning("Resuming synchronization of disk %s of instance '%s'"
1047 763ad5be Thomas Thrainer
                        " failed", idx, instance.name)
1048 763ad5be Thomas Thrainer
1049 763ad5be Thomas Thrainer
1050 a365b47f Bernardo Dal Seno
def WipeOrCleanupDisks(lu, instance, disks=None, cleanup=None):
1051 a365b47f Bernardo Dal Seno
  """Wrapper for L{WipeDisks} that handles errors.
1052 a365b47f Bernardo Dal Seno

1053 a365b47f Bernardo Dal Seno
  @type lu: L{LogicalUnit}
1054 a365b47f Bernardo Dal Seno
  @param lu: the logical unit on whose behalf we execute
1055 a365b47f Bernardo Dal Seno
  @type instance: L{objects.Instance}
1056 a365b47f Bernardo Dal Seno
  @param instance: the instance whose disks we should wipe
1057 a365b47f Bernardo Dal Seno
  @param disks: see L{WipeDisks}
1058 a365b47f Bernardo Dal Seno
  @param cleanup: the result returned by L{CreateDisks}, used for cleanup in
1059 a365b47f Bernardo Dal Seno
      case of error
1060 a365b47f Bernardo Dal Seno
  @raise errors.OpPrereqError: in case of failure
1061 a365b47f Bernardo Dal Seno

1062 a365b47f Bernardo Dal Seno
  """
1063 a365b47f Bernardo Dal Seno
  try:
1064 a365b47f Bernardo Dal Seno
    WipeDisks(lu, instance, disks=disks)
1065 a365b47f Bernardo Dal Seno
  except errors.OpExecError:
1066 a365b47f Bernardo Dal Seno
    logging.warning("Wiping disks for instance '%s' failed",
1067 a365b47f Bernardo Dal Seno
                    instance.name)
1068 a365b47f Bernardo Dal Seno
    _UndoCreateDisks(lu, cleanup)
1069 a365b47f Bernardo Dal Seno
    raise
1070 a365b47f Bernardo Dal Seno
1071 a365b47f Bernardo Dal Seno
1072 5eacbcae Thomas Thrainer
def ExpandCheckDisks(instance, disks):
1073 763ad5be Thomas Thrainer
  """Return the instance disks selected by the disks list
1074 763ad5be Thomas Thrainer

1075 763ad5be Thomas Thrainer
  @type disks: list of L{objects.Disk} or None
1076 763ad5be Thomas Thrainer
  @param disks: selected disks
1077 763ad5be Thomas Thrainer
  @rtype: list of L{objects.Disk}
1078 763ad5be Thomas Thrainer
  @return: selected instance disks to act on
1079 763ad5be Thomas Thrainer

1080 763ad5be Thomas Thrainer
  """
1081 763ad5be Thomas Thrainer
  if disks is None:
1082 763ad5be Thomas Thrainer
    return instance.disks
1083 763ad5be Thomas Thrainer
  else:
1084 763ad5be Thomas Thrainer
    if not set(disks).issubset(instance.disks):
1085 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Can only act on disks belonging to the"
1086 763ad5be Thomas Thrainer
                                   " target instance")
1087 763ad5be Thomas Thrainer
    return disks
1088 763ad5be Thomas Thrainer
1089 763ad5be Thomas Thrainer
1090 5eacbcae Thomas Thrainer
def WaitForSync(lu, instance, disks=None, oneshot=False):
1091 763ad5be Thomas Thrainer
  """Sleep and poll for an instance's disk to sync.
1092 763ad5be Thomas Thrainer

1093 763ad5be Thomas Thrainer
  """
1094 763ad5be Thomas Thrainer
  if not instance.disks or disks is not None and not disks:
1095 763ad5be Thomas Thrainer
    return True
1096 763ad5be Thomas Thrainer
1097 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1098 763ad5be Thomas Thrainer
1099 763ad5be Thomas Thrainer
  if not oneshot:
1100 763ad5be Thomas Thrainer
    lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
1101 763ad5be Thomas Thrainer
1102 763ad5be Thomas Thrainer
  node = instance.primary_node
1103 763ad5be Thomas Thrainer
1104 763ad5be Thomas Thrainer
  for dev in disks:
1105 763ad5be Thomas Thrainer
    lu.cfg.SetDiskID(dev, node)
1106 763ad5be Thomas Thrainer
1107 763ad5be Thomas Thrainer
  # TODO: Convert to utils.Retry
1108 763ad5be Thomas Thrainer
1109 763ad5be Thomas Thrainer
  retries = 0
1110 763ad5be Thomas Thrainer
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1111 763ad5be Thomas Thrainer
  while True:
1112 763ad5be Thomas Thrainer
    max_time = 0
1113 763ad5be Thomas Thrainer
    done = True
1114 763ad5be Thomas Thrainer
    cumul_degraded = False
1115 763ad5be Thomas Thrainer
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node, (disks, instance))
1116 763ad5be Thomas Thrainer
    msg = rstats.fail_msg
1117 763ad5be Thomas Thrainer
    if msg:
1118 763ad5be Thomas Thrainer
      lu.LogWarning("Can't get any data from node %s: %s", node, msg)
1119 763ad5be Thomas Thrainer
      retries += 1
1120 763ad5be Thomas Thrainer
      if retries >= 10:
1121 763ad5be Thomas Thrainer
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1122 763ad5be Thomas Thrainer
                                 " aborting." % node)
1123 763ad5be Thomas Thrainer
      time.sleep(6)
1124 763ad5be Thomas Thrainer
      continue
1125 763ad5be Thomas Thrainer
    rstats = rstats.payload
1126 763ad5be Thomas Thrainer
    retries = 0
1127 763ad5be Thomas Thrainer
    for i, mstat in enumerate(rstats):
1128 763ad5be Thomas Thrainer
      if mstat is None:
1129 763ad5be Thomas Thrainer
        lu.LogWarning("Can't compute data for node %s/%s",
1130 763ad5be Thomas Thrainer
                      node, disks[i].iv_name)
1131 763ad5be Thomas Thrainer
        continue
1132 763ad5be Thomas Thrainer
1133 763ad5be Thomas Thrainer
      cumul_degraded = (cumul_degraded or
1134 763ad5be Thomas Thrainer
                        (mstat.is_degraded and mstat.sync_percent is None))
1135 763ad5be Thomas Thrainer
      if mstat.sync_percent is not None:
1136 763ad5be Thomas Thrainer
        done = False
1137 763ad5be Thomas Thrainer
        if mstat.estimated_time is not None:
1138 763ad5be Thomas Thrainer
          rem_time = ("%s remaining (estimated)" %
1139 763ad5be Thomas Thrainer
                      utils.FormatSeconds(mstat.estimated_time))
1140 763ad5be Thomas Thrainer
          max_time = mstat.estimated_time
1141 763ad5be Thomas Thrainer
        else:
1142 763ad5be Thomas Thrainer
          rem_time = "no time estimate"
1143 763ad5be Thomas Thrainer
        lu.LogInfo("- device %s: %5.2f%% done, %s",
1144 763ad5be Thomas Thrainer
                   disks[i].iv_name, mstat.sync_percent, rem_time)
1145 763ad5be Thomas Thrainer
1146 763ad5be Thomas Thrainer
    # if we're done but degraded, let's do a few small retries, to
1147 763ad5be Thomas Thrainer
    # make sure we see a stable and not transient situation; therefore
1148 763ad5be Thomas Thrainer
    # we force restart of the loop
1149 763ad5be Thomas Thrainer
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1150 763ad5be Thomas Thrainer
      logging.info("Degraded disks found, %d retries left", degr_retries)
1151 763ad5be Thomas Thrainer
      degr_retries -= 1
1152 763ad5be Thomas Thrainer
      time.sleep(1)
1153 763ad5be Thomas Thrainer
      continue
1154 763ad5be Thomas Thrainer
1155 763ad5be Thomas Thrainer
    if done or oneshot:
1156 763ad5be Thomas Thrainer
      break
1157 763ad5be Thomas Thrainer
1158 763ad5be Thomas Thrainer
    time.sleep(min(60, max_time))
1159 763ad5be Thomas Thrainer
1160 763ad5be Thomas Thrainer
  if done:
1161 763ad5be Thomas Thrainer
    lu.LogInfo("Instance %s's disks are in sync", instance.name)
1162 763ad5be Thomas Thrainer
1163 763ad5be Thomas Thrainer
  return not cumul_degraded
1164 763ad5be Thomas Thrainer
1165 763ad5be Thomas Thrainer
1166 5eacbcae Thomas Thrainer
def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
1167 763ad5be Thomas Thrainer
  """Shutdown block devices of an instance.
1168 763ad5be Thomas Thrainer

1169 763ad5be Thomas Thrainer
  This does the shutdown on all nodes of the instance.
1170 763ad5be Thomas Thrainer

1171 763ad5be Thomas Thrainer
  If the ignore_primary is false, errors on the primary node are
1172 763ad5be Thomas Thrainer
  ignored.
1173 763ad5be Thomas Thrainer

1174 763ad5be Thomas Thrainer
  """
1175 763ad5be Thomas Thrainer
  all_result = True
1176 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1177 763ad5be Thomas Thrainer
1178 763ad5be Thomas Thrainer
  for disk in disks:
1179 763ad5be Thomas Thrainer
    for node, top_disk in disk.ComputeNodeTree(instance.primary_node):
1180 763ad5be Thomas Thrainer
      lu.cfg.SetDiskID(top_disk, node)
1181 763ad5be Thomas Thrainer
      result = lu.rpc.call_blockdev_shutdown(node, (top_disk, instance))
1182 763ad5be Thomas Thrainer
      msg = result.fail_msg
1183 763ad5be Thomas Thrainer
      if msg:
1184 763ad5be Thomas Thrainer
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
1185 763ad5be Thomas Thrainer
                      disk.iv_name, node, msg)
1186 763ad5be Thomas Thrainer
        if ((node == instance.primary_node and not ignore_primary) or
1187 763ad5be Thomas Thrainer
            (node != instance.primary_node and not result.offline)):
1188 763ad5be Thomas Thrainer
          all_result = False
1189 763ad5be Thomas Thrainer
  return all_result
1190 763ad5be Thomas Thrainer
1191 763ad5be Thomas Thrainer
1192 763ad5be Thomas Thrainer
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
1193 763ad5be Thomas Thrainer
  """Shutdown block devices of an instance.
1194 763ad5be Thomas Thrainer

1195 763ad5be Thomas Thrainer
  This function checks if an instance is running, before calling
1196 763ad5be Thomas Thrainer
  _ShutdownInstanceDisks.
1197 763ad5be Thomas Thrainer

1198 763ad5be Thomas Thrainer
  """
1199 5eacbcae Thomas Thrainer
  CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
1200 5eacbcae Thomas Thrainer
  ShutdownInstanceDisks(lu, instance, disks=disks)
1201 763ad5be Thomas Thrainer
1202 763ad5be Thomas Thrainer
1203 5eacbcae Thomas Thrainer
def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
1204 763ad5be Thomas Thrainer
                           ignore_size=False):
1205 763ad5be Thomas Thrainer
  """Prepare the block devices for an instance.
1206 763ad5be Thomas Thrainer

1207 763ad5be Thomas Thrainer
  This sets up the block devices on all nodes.
1208 763ad5be Thomas Thrainer

1209 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
1210 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
1211 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
1212 763ad5be Thomas Thrainer
  @param instance: the instance for whose disks we assemble
1213 763ad5be Thomas Thrainer
  @type disks: list of L{objects.Disk} or None
1214 763ad5be Thomas Thrainer
  @param disks: which disks to assemble (or all, if None)
1215 763ad5be Thomas Thrainer
  @type ignore_secondaries: boolean
1216 763ad5be Thomas Thrainer
  @param ignore_secondaries: if true, errors on secondary nodes
1217 763ad5be Thomas Thrainer
      won't result in an error return from the function
1218 763ad5be Thomas Thrainer
  @type ignore_size: boolean
1219 763ad5be Thomas Thrainer
  @param ignore_size: if true, the current known size of the disk
1220 763ad5be Thomas Thrainer
      will not be used during the disk activation, useful for cases
1221 763ad5be Thomas Thrainer
      when the size is wrong
1222 763ad5be Thomas Thrainer
  @return: False if the operation failed, otherwise a list of
1223 763ad5be Thomas Thrainer
      (host, instance_visible_name, node_visible_name)
1224 763ad5be Thomas Thrainer
      with the mapping from node devices to instance devices
1225 763ad5be Thomas Thrainer

1226 763ad5be Thomas Thrainer
  """
1227 763ad5be Thomas Thrainer
  device_info = []
1228 763ad5be Thomas Thrainer
  disks_ok = True
1229 763ad5be Thomas Thrainer
  iname = instance.name
1230 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1231 763ad5be Thomas Thrainer
1232 763ad5be Thomas Thrainer
  # With the two passes mechanism we try to reduce the window of
1233 763ad5be Thomas Thrainer
  # opportunity for the race condition of switching DRBD to primary
1234 763ad5be Thomas Thrainer
  # before handshaking occured, but we do not eliminate it
1235 763ad5be Thomas Thrainer
1236 763ad5be Thomas Thrainer
  # The proper fix would be to wait (with some limits) until the
1237 763ad5be Thomas Thrainer
  # connection has been made and drbd transitions from WFConnection
1238 763ad5be Thomas Thrainer
  # into any other network-connected state (Connected, SyncTarget,
1239 763ad5be Thomas Thrainer
  # SyncSource, etc.)
1240 763ad5be Thomas Thrainer
1241 763ad5be Thomas Thrainer
  # 1st pass, assemble on all nodes in secondary mode
1242 763ad5be Thomas Thrainer
  for idx, inst_disk in enumerate(disks):
1243 763ad5be Thomas Thrainer
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1244 763ad5be Thomas Thrainer
      if ignore_size:
1245 763ad5be Thomas Thrainer
        node_disk = node_disk.Copy()
1246 763ad5be Thomas Thrainer
        node_disk.UnsetSize()
1247 763ad5be Thomas Thrainer
      lu.cfg.SetDiskID(node_disk, node)
1248 763ad5be Thomas Thrainer
      result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
1249 763ad5be Thomas Thrainer
                                             False, idx)
1250 763ad5be Thomas Thrainer
      msg = result.fail_msg
1251 763ad5be Thomas Thrainer
      if msg:
1252 763ad5be Thomas Thrainer
        is_offline_secondary = (node in instance.secondary_nodes and
1253 763ad5be Thomas Thrainer
                                result.offline)
1254 763ad5be Thomas Thrainer
        lu.LogWarning("Could not prepare block device %s on node %s"
1255 763ad5be Thomas Thrainer
                      " (is_primary=False, pass=1): %s",
1256 763ad5be Thomas Thrainer
                      inst_disk.iv_name, node, msg)
1257 763ad5be Thomas Thrainer
        if not (ignore_secondaries or is_offline_secondary):
1258 763ad5be Thomas Thrainer
          disks_ok = False
1259 763ad5be Thomas Thrainer
1260 763ad5be Thomas Thrainer
  # FIXME: race condition on drbd migration to primary
1261 763ad5be Thomas Thrainer
1262 763ad5be Thomas Thrainer
  # 2nd pass, do only the primary node
1263 763ad5be Thomas Thrainer
  for idx, inst_disk in enumerate(disks):
1264 763ad5be Thomas Thrainer
    dev_path = None
1265 763ad5be Thomas Thrainer
1266 763ad5be Thomas Thrainer
    for node, node_disk in inst_disk.ComputeNodeTree(instance.primary_node):
1267 763ad5be Thomas Thrainer
      if node != instance.primary_node:
1268 763ad5be Thomas Thrainer
        continue
1269 763ad5be Thomas Thrainer
      if ignore_size:
1270 763ad5be Thomas Thrainer
        node_disk = node_disk.Copy()
1271 763ad5be Thomas Thrainer
        node_disk.UnsetSize()
1272 763ad5be Thomas Thrainer
      lu.cfg.SetDiskID(node_disk, node)
1273 763ad5be Thomas Thrainer
      result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
1274 763ad5be Thomas Thrainer
                                             True, idx)
1275 763ad5be Thomas Thrainer
      msg = result.fail_msg
1276 763ad5be Thomas Thrainer
      if msg:
1277 763ad5be Thomas Thrainer
        lu.LogWarning("Could not prepare block device %s on node %s"
1278 763ad5be Thomas Thrainer
                      " (is_primary=True, pass=2): %s",
1279 763ad5be Thomas Thrainer
                      inst_disk.iv_name, node, msg)
1280 763ad5be Thomas Thrainer
        disks_ok = False
1281 763ad5be Thomas Thrainer
      else:
1282 763ad5be Thomas Thrainer
        dev_path = result.payload
1283 763ad5be Thomas Thrainer
1284 763ad5be Thomas Thrainer
    device_info.append((instance.primary_node, inst_disk.iv_name, dev_path))
1285 763ad5be Thomas Thrainer
1286 763ad5be Thomas Thrainer
  # leave the disks configured for the primary node
1287 763ad5be Thomas Thrainer
  # this is a workaround that would be fixed better by
1288 763ad5be Thomas Thrainer
  # improving the logical/physical id handling
1289 763ad5be Thomas Thrainer
  for disk in disks:
1290 763ad5be Thomas Thrainer
    lu.cfg.SetDiskID(disk, instance.primary_node)
1291 763ad5be Thomas Thrainer
1292 763ad5be Thomas Thrainer
  return disks_ok, device_info
1293 763ad5be Thomas Thrainer
1294 763ad5be Thomas Thrainer
1295 5eacbcae Thomas Thrainer
def StartInstanceDisks(lu, instance, force):
1296 763ad5be Thomas Thrainer
  """Start the disks of an instance.
1297 763ad5be Thomas Thrainer

1298 763ad5be Thomas Thrainer
  """
1299 5eacbcae Thomas Thrainer
  disks_ok, _ = AssembleInstanceDisks(lu, instance,
1300 5eacbcae Thomas Thrainer
                                      ignore_secondaries=force)
1301 763ad5be Thomas Thrainer
  if not disks_ok:
1302 5eacbcae Thomas Thrainer
    ShutdownInstanceDisks(lu, instance)
1303 763ad5be Thomas Thrainer
    if force is not None and not force:
1304 763ad5be Thomas Thrainer
      lu.LogWarning("",
1305 763ad5be Thomas Thrainer
                    hint=("If the message above refers to a secondary node,"
1306 763ad5be Thomas Thrainer
                          " you can retry the operation using '--force'"))
1307 763ad5be Thomas Thrainer
    raise errors.OpExecError("Disk consistency error")
1308 763ad5be Thomas Thrainer
1309 763ad5be Thomas Thrainer
1310 763ad5be Thomas Thrainer
class LUInstanceGrowDisk(LogicalUnit):
1311 763ad5be Thomas Thrainer
  """Grow a disk of an instance.
1312 763ad5be Thomas Thrainer

1313 763ad5be Thomas Thrainer
  """
1314 763ad5be Thomas Thrainer
  HPATH = "disk-grow"
1315 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
1316 763ad5be Thomas Thrainer
  REQ_BGL = False
1317 763ad5be Thomas Thrainer
1318 763ad5be Thomas Thrainer
  def ExpandNames(self):
1319 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1320 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1321 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1322 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1323 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
1324 763ad5be Thomas Thrainer
1325 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1326 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1327 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1328 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
1329 763ad5be Thomas Thrainer
      # Copy node locks
1330 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1331 5eacbcae Thomas Thrainer
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1332 763ad5be Thomas Thrainer
1333 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
1334 763ad5be Thomas Thrainer
    """Build hooks env.
1335 763ad5be Thomas Thrainer

1336 763ad5be Thomas Thrainer
    This runs on the master, the primary and all the secondaries.
1337 763ad5be Thomas Thrainer

1338 763ad5be Thomas Thrainer
    """
1339 763ad5be Thomas Thrainer
    env = {
1340 763ad5be Thomas Thrainer
      "DISK": self.op.disk,
1341 763ad5be Thomas Thrainer
      "AMOUNT": self.op.amount,
1342 763ad5be Thomas Thrainer
      "ABSOLUTE": self.op.absolute,
1343 763ad5be Thomas Thrainer
      }
1344 5eacbcae Thomas Thrainer
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1345 763ad5be Thomas Thrainer
    return env
1346 763ad5be Thomas Thrainer
1347 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
1348 763ad5be Thomas Thrainer
    """Build hooks nodes.
1349 763ad5be Thomas Thrainer

1350 763ad5be Thomas Thrainer
    """
1351 763ad5be Thomas Thrainer
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1352 763ad5be Thomas Thrainer
    return (nl, nl)
1353 763ad5be Thomas Thrainer
1354 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1355 763ad5be Thomas Thrainer
    """Check prerequisites.
1356 763ad5be Thomas Thrainer

1357 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1358 763ad5be Thomas Thrainer

1359 763ad5be Thomas Thrainer
    """
1360 763ad5be Thomas Thrainer
    instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1361 763ad5be Thomas Thrainer
    assert instance is not None, \
1362 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1363 763ad5be Thomas Thrainer
    nodenames = list(instance.all_nodes)
1364 763ad5be Thomas Thrainer
    for node in nodenames:
1365 5eacbcae Thomas Thrainer
      CheckNodeOnline(self, node)
1366 763ad5be Thomas Thrainer
1367 763ad5be Thomas Thrainer
    self.instance = instance
1368 763ad5be Thomas Thrainer
1369 763ad5be Thomas Thrainer
    if instance.disk_template not in constants.DTS_GROWABLE:
1370 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Instance's disk layout does not support"
1371 763ad5be Thomas Thrainer
                                 " growing", errors.ECODE_INVAL)
1372 763ad5be Thomas Thrainer
1373 763ad5be Thomas Thrainer
    self.disk = instance.FindDisk(self.op.disk)
1374 763ad5be Thomas Thrainer
1375 763ad5be Thomas Thrainer
    if self.op.absolute:
1376 763ad5be Thomas Thrainer
      self.target = self.op.amount
1377 763ad5be Thomas Thrainer
      self.delta = self.target - self.disk.size
1378 763ad5be Thomas Thrainer
      if self.delta < 0:
1379 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Requested size (%s) is smaller than "
1380 763ad5be Thomas Thrainer
                                   "current disk size (%s)" %
1381 763ad5be Thomas Thrainer
                                   (utils.FormatUnit(self.target, "h"),
1382 763ad5be Thomas Thrainer
                                    utils.FormatUnit(self.disk.size, "h")),
1383 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
1384 763ad5be Thomas Thrainer
    else:
1385 763ad5be Thomas Thrainer
      self.delta = self.op.amount
1386 763ad5be Thomas Thrainer
      self.target = self.disk.size + self.delta
1387 763ad5be Thomas Thrainer
      if self.delta < 0:
1388 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Requested increment (%s) is negative" %
1389 763ad5be Thomas Thrainer
                                   utils.FormatUnit(self.delta, "h"),
1390 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
1391 763ad5be Thomas Thrainer
1392 763ad5be Thomas Thrainer
    self._CheckDiskSpace(nodenames, self.disk.ComputeGrowth(self.delta))
1393 763ad5be Thomas Thrainer
1394 763ad5be Thomas Thrainer
  def _CheckDiskSpace(self, nodenames, req_vgspace):
1395 763ad5be Thomas Thrainer
    template = self.instance.disk_template
1396 763ad5be Thomas Thrainer
    if template not in (constants.DTS_NO_FREE_SPACE_CHECK):
1397 763ad5be Thomas Thrainer
      # TODO: check the free disk space for file, when that feature will be
1398 763ad5be Thomas Thrainer
      # supported
1399 763ad5be Thomas Thrainer
      nodes = map(self.cfg.GetNodeInfo, nodenames)
1400 5eacbcae Thomas Thrainer
      es_nodes = filter(lambda n: IsExclusiveStorageEnabledNode(self.cfg, n),
1401 763ad5be Thomas Thrainer
                        nodes)
1402 763ad5be Thomas Thrainer
      if es_nodes:
1403 763ad5be Thomas Thrainer
        # With exclusive storage we need to something smarter than just looking
1404 763ad5be Thomas Thrainer
        # at free space; for now, let's simply abort the operation.
1405 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Cannot grow disks when exclusive_storage"
1406 763ad5be Thomas Thrainer
                                   " is enabled", errors.ECODE_STATE)
1407 5eacbcae Thomas Thrainer
      CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace)
1408 763ad5be Thomas Thrainer
1409 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1410 763ad5be Thomas Thrainer
    """Execute disk grow.
1411 763ad5be Thomas Thrainer

1412 763ad5be Thomas Thrainer
    """
1413 763ad5be Thomas Thrainer
    instance = self.instance
1414 763ad5be Thomas Thrainer
    disk = self.disk
1415 763ad5be Thomas Thrainer
1416 763ad5be Thomas Thrainer
    assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1417 763ad5be Thomas Thrainer
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1418 763ad5be Thomas Thrainer
            self.owned_locks(locking.LEVEL_NODE_RES))
1419 763ad5be Thomas Thrainer
1420 763ad5be Thomas Thrainer
    wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
1421 763ad5be Thomas Thrainer
1422 5eacbcae Thomas Thrainer
    disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[disk])
1423 763ad5be Thomas Thrainer
    if not disks_ok:
1424 763ad5be Thomas Thrainer
      raise errors.OpExecError("Cannot activate block device to grow")
1425 763ad5be Thomas Thrainer
1426 763ad5be Thomas Thrainer
    feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
1427 763ad5be Thomas Thrainer
                (self.op.disk, instance.name,
1428 763ad5be Thomas Thrainer
                 utils.FormatUnit(self.delta, "h"),
1429 763ad5be Thomas Thrainer
                 utils.FormatUnit(self.target, "h")))
1430 763ad5be Thomas Thrainer
1431 763ad5be Thomas Thrainer
    # First run all grow ops in dry-run mode
1432 763ad5be Thomas Thrainer
    for node in instance.all_nodes:
1433 763ad5be Thomas Thrainer
      self.cfg.SetDiskID(disk, node)
1434 763ad5be Thomas Thrainer
      result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
1435 763ad5be Thomas Thrainer
                                           True, True)
1436 763ad5be Thomas Thrainer
      result.Raise("Dry-run grow request failed to node %s" % node)
1437 763ad5be Thomas Thrainer
1438 763ad5be Thomas Thrainer
    if wipe_disks:
1439 763ad5be Thomas Thrainer
      # Get disk size from primary node for wiping
1440 763ad5be Thomas Thrainer
      result = self.rpc.call_blockdev_getsize(instance.primary_node, [disk])
1441 763ad5be Thomas Thrainer
      result.Raise("Failed to retrieve disk size from node '%s'" %
1442 763ad5be Thomas Thrainer
                   instance.primary_node)
1443 763ad5be Thomas Thrainer
1444 763ad5be Thomas Thrainer
      (disk_size_in_bytes, ) = result.payload
1445 763ad5be Thomas Thrainer
1446 763ad5be Thomas Thrainer
      if disk_size_in_bytes is None:
1447 763ad5be Thomas Thrainer
        raise errors.OpExecError("Failed to retrieve disk size from primary"
1448 763ad5be Thomas Thrainer
                                 " node '%s'" % instance.primary_node)
1449 763ad5be Thomas Thrainer
1450 763ad5be Thomas Thrainer
      old_disk_size = _DiskSizeInBytesToMebibytes(self, disk_size_in_bytes)
1451 763ad5be Thomas Thrainer
1452 763ad5be Thomas Thrainer
      assert old_disk_size >= disk.size, \
1453 763ad5be Thomas Thrainer
        ("Retrieved disk size too small (got %s, should be at least %s)" %
1454 763ad5be Thomas Thrainer
         (old_disk_size, disk.size))
1455 763ad5be Thomas Thrainer
    else:
1456 763ad5be Thomas Thrainer
      old_disk_size = None
1457 763ad5be Thomas Thrainer
1458 763ad5be Thomas Thrainer
    # We know that (as far as we can test) operations across different
1459 763ad5be Thomas Thrainer
    # nodes will succeed, time to run it for real on the backing storage
1460 763ad5be Thomas Thrainer
    for node in instance.all_nodes:
1461 763ad5be Thomas Thrainer
      self.cfg.SetDiskID(disk, node)
1462 763ad5be Thomas Thrainer
      result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
1463 763ad5be Thomas Thrainer
                                           False, True)
1464 763ad5be Thomas Thrainer
      result.Raise("Grow request failed to node %s" % node)
1465 763ad5be Thomas Thrainer
1466 763ad5be Thomas Thrainer
    # And now execute it for logical storage, on the primary node
1467 763ad5be Thomas Thrainer
    node = instance.primary_node
1468 763ad5be Thomas Thrainer
    self.cfg.SetDiskID(disk, node)
1469 763ad5be Thomas Thrainer
    result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
1470 763ad5be Thomas Thrainer
                                         False, False)
1471 763ad5be Thomas Thrainer
    result.Raise("Grow request failed to node %s" % node)
1472 763ad5be Thomas Thrainer
1473 763ad5be Thomas Thrainer
    disk.RecordGrow(self.delta)
1474 763ad5be Thomas Thrainer
    self.cfg.Update(instance, feedback_fn)
1475 763ad5be Thomas Thrainer
1476 763ad5be Thomas Thrainer
    # Changes have been recorded, release node lock
1477 5eacbcae Thomas Thrainer
    ReleaseLocks(self, locking.LEVEL_NODE)
1478 763ad5be Thomas Thrainer
1479 763ad5be Thomas Thrainer
    # Downgrade lock while waiting for sync
1480 763ad5be Thomas Thrainer
    self.glm.downgrade(locking.LEVEL_INSTANCE)
1481 763ad5be Thomas Thrainer
1482 763ad5be Thomas Thrainer
    assert wipe_disks ^ (old_disk_size is None)
1483 763ad5be Thomas Thrainer
1484 763ad5be Thomas Thrainer
    if wipe_disks:
1485 763ad5be Thomas Thrainer
      assert instance.disks[self.op.disk] == disk
1486 763ad5be Thomas Thrainer
1487 763ad5be Thomas Thrainer
      # Wipe newly added disk space
1488 5eacbcae Thomas Thrainer
      WipeDisks(self, instance,
1489 5eacbcae Thomas Thrainer
                disks=[(self.op.disk, disk, old_disk_size)])
1490 763ad5be Thomas Thrainer
1491 763ad5be Thomas Thrainer
    if self.op.wait_for_sync:
1492 5eacbcae Thomas Thrainer
      disk_abort = not WaitForSync(self, instance, disks=[disk])
1493 763ad5be Thomas Thrainer
      if disk_abort:
1494 763ad5be Thomas Thrainer
        self.LogWarning("Disk syncing has not returned a good status; check"
1495 763ad5be Thomas Thrainer
                        " the instance")
1496 763ad5be Thomas Thrainer
      if instance.admin_state != constants.ADMINST_UP:
1497 763ad5be Thomas Thrainer
        _SafeShutdownInstanceDisks(self, instance, disks=[disk])
1498 763ad5be Thomas Thrainer
    elif instance.admin_state != constants.ADMINST_UP:
1499 763ad5be Thomas Thrainer
      self.LogWarning("Not shutting down the disk even if the instance is"
1500 763ad5be Thomas Thrainer
                      " not supposed to be running because no wait for"
1501 763ad5be Thomas Thrainer
                      " sync mode was requested")
1502 763ad5be Thomas Thrainer
1503 763ad5be Thomas Thrainer
    assert self.owned_locks(locking.LEVEL_NODE_RES)
1504 763ad5be Thomas Thrainer
    assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1505 763ad5be Thomas Thrainer
1506 763ad5be Thomas Thrainer
1507 763ad5be Thomas Thrainer
class LUInstanceReplaceDisks(LogicalUnit):
1508 763ad5be Thomas Thrainer
  """Replace the disks of an instance.
1509 763ad5be Thomas Thrainer

1510 763ad5be Thomas Thrainer
  """
1511 763ad5be Thomas Thrainer
  HPATH = "mirrors-replace"
1512 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
1513 763ad5be Thomas Thrainer
  REQ_BGL = False
1514 763ad5be Thomas Thrainer
1515 763ad5be Thomas Thrainer
  def CheckArguments(self):
1516 763ad5be Thomas Thrainer
    """Check arguments.
1517 763ad5be Thomas Thrainer

1518 763ad5be Thomas Thrainer
    """
1519 763ad5be Thomas Thrainer
    remote_node = self.op.remote_node
1520 763ad5be Thomas Thrainer
    ialloc = self.op.iallocator
1521 763ad5be Thomas Thrainer
    if self.op.mode == constants.REPLACE_DISK_CHG:
1522 763ad5be Thomas Thrainer
      if remote_node is None and ialloc is None:
1523 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("When changing the secondary either an"
1524 763ad5be Thomas Thrainer
                                   " iallocator script must be used or the"
1525 763ad5be Thomas Thrainer
                                   " new node given", errors.ECODE_INVAL)
1526 763ad5be Thomas Thrainer
      else:
1527 5eacbcae Thomas Thrainer
        CheckIAllocatorOrNode(self, "iallocator", "remote_node")
1528 763ad5be Thomas Thrainer
1529 763ad5be Thomas Thrainer
    elif remote_node is not None or ialloc is not None:
1530 763ad5be Thomas Thrainer
      # Not replacing the secondary
1531 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The iallocator and new node options can"
1532 763ad5be Thomas Thrainer
                                 " only be used when changing the"
1533 763ad5be Thomas Thrainer
                                 " secondary node", errors.ECODE_INVAL)
1534 763ad5be Thomas Thrainer
1535 763ad5be Thomas Thrainer
  def ExpandNames(self):
1536 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1537 763ad5be Thomas Thrainer
1538 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODE not in self.needed_locks
1539 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODE_RES not in self.needed_locks
1540 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODEGROUP not in self.needed_locks
1541 763ad5be Thomas Thrainer
1542 763ad5be Thomas Thrainer
    assert self.op.iallocator is None or self.op.remote_node is None, \
1543 763ad5be Thomas Thrainer
      "Conflicting options"
1544 763ad5be Thomas Thrainer
1545 763ad5be Thomas Thrainer
    if self.op.remote_node is not None:
1546 5eacbcae Thomas Thrainer
      self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
1547 763ad5be Thomas Thrainer
1548 763ad5be Thomas Thrainer
      # Warning: do not remove the locking of the new secondary here
1549 1bb99a33 Bernardo Dal Seno
      # unless DRBD8Dev.AddChildren is changed to work in parallel;
1550 763ad5be Thomas Thrainer
      # currently it doesn't since parallel invocations of
1551 763ad5be Thomas Thrainer
      # FindUnusedMinor will conflict
1552 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
1553 763ad5be Thomas Thrainer
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1554 763ad5be Thomas Thrainer
    else:
1555 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = []
1556 763ad5be Thomas Thrainer
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1557 763ad5be Thomas Thrainer
1558 763ad5be Thomas Thrainer
      if self.op.iallocator is not None:
1559 763ad5be Thomas Thrainer
        # iallocator will select a new node in the same group
1560 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
1561 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
1562 763ad5be Thomas Thrainer
1563 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1564 763ad5be Thomas Thrainer
1565 763ad5be Thomas Thrainer
    self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
1566 763ad5be Thomas Thrainer
                                   self.op.iallocator, self.op.remote_node,
1567 763ad5be Thomas Thrainer
                                   self.op.disks, self.op.early_release,
1568 763ad5be Thomas Thrainer
                                   self.op.ignore_ipolicy)
1569 763ad5be Thomas Thrainer
1570 763ad5be Thomas Thrainer
    self.tasklets = [self.replacer]
1571 763ad5be Thomas Thrainer
1572 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1573 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODEGROUP:
1574 763ad5be Thomas Thrainer
      assert self.op.remote_node is None
1575 763ad5be Thomas Thrainer
      assert self.op.iallocator is not None
1576 763ad5be Thomas Thrainer
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
1577 763ad5be Thomas Thrainer
1578 763ad5be Thomas Thrainer
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
1579 763ad5be Thomas Thrainer
      # Lock all groups used by instance optimistically; this requires going
1580 763ad5be Thomas Thrainer
      # via the node before it's locked, requiring verification later on
1581 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
1582 763ad5be Thomas Thrainer
        self.cfg.GetInstanceNodeGroups(self.op.instance_name)
1583 763ad5be Thomas Thrainer
1584 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE:
1585 763ad5be Thomas Thrainer
      if self.op.iallocator is not None:
1586 763ad5be Thomas Thrainer
        assert self.op.remote_node is None
1587 763ad5be Thomas Thrainer
        assert not self.needed_locks[locking.LEVEL_NODE]
1588 763ad5be Thomas Thrainer
        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
1589 763ad5be Thomas Thrainer
1590 763ad5be Thomas Thrainer
        # Lock member nodes of all locked groups
1591 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE] = \
1592 763ad5be Thomas Thrainer
          [node_name
1593 763ad5be Thomas Thrainer
           for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
1594 763ad5be Thomas Thrainer
           for node_name in self.cfg.GetNodeGroup(group_uuid).members]
1595 763ad5be Thomas Thrainer
      else:
1596 763ad5be Thomas Thrainer
        assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1597 763ad5be Thomas Thrainer
1598 763ad5be Thomas Thrainer
        self._LockInstancesNodes()
1599 763ad5be Thomas Thrainer
1600 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
1601 763ad5be Thomas Thrainer
      # Reuse node locks
1602 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1603 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE]
1604 763ad5be Thomas Thrainer
1605 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
1606 763ad5be Thomas Thrainer
    """Build hooks env.
1607 763ad5be Thomas Thrainer

1608 763ad5be Thomas Thrainer
    This runs on the master, the primary and all the secondaries.
1609 763ad5be Thomas Thrainer

1610 763ad5be Thomas Thrainer
    """
1611 763ad5be Thomas Thrainer
    instance = self.replacer.instance
1612 763ad5be Thomas Thrainer
    env = {
1613 763ad5be Thomas Thrainer
      "MODE": self.op.mode,
1614 763ad5be Thomas Thrainer
      "NEW_SECONDARY": self.op.remote_node,
1615 763ad5be Thomas Thrainer
      "OLD_SECONDARY": instance.secondary_nodes[0],
1616 763ad5be Thomas Thrainer
      }
1617 5eacbcae Thomas Thrainer
    env.update(BuildInstanceHookEnvByObject(self, instance))
1618 763ad5be Thomas Thrainer
    return env
1619 763ad5be Thomas Thrainer
1620 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
1621 763ad5be Thomas Thrainer
    """Build hooks nodes.
1622 763ad5be Thomas Thrainer

1623 763ad5be Thomas Thrainer
    """
1624 763ad5be Thomas Thrainer
    instance = self.replacer.instance
1625 763ad5be Thomas Thrainer
    nl = [
1626 763ad5be Thomas Thrainer
      self.cfg.GetMasterNode(),
1627 763ad5be Thomas Thrainer
      instance.primary_node,
1628 763ad5be Thomas Thrainer
      ]
1629 763ad5be Thomas Thrainer
    if self.op.remote_node is not None:
1630 763ad5be Thomas Thrainer
      nl.append(self.op.remote_node)
1631 763ad5be Thomas Thrainer
    return nl, nl
1632 763ad5be Thomas Thrainer
1633 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1634 763ad5be Thomas Thrainer
    """Check prerequisites.
1635 763ad5be Thomas Thrainer

1636 763ad5be Thomas Thrainer
    """
1637 763ad5be Thomas Thrainer
    assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
1638 763ad5be Thomas Thrainer
            self.op.iallocator is None)
1639 763ad5be Thomas Thrainer
1640 763ad5be Thomas Thrainer
    # Verify if node group locks are still correct
1641 763ad5be Thomas Thrainer
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
1642 763ad5be Thomas Thrainer
    if owned_groups:
1643 5eacbcae Thomas Thrainer
      CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
1644 763ad5be Thomas Thrainer
1645 763ad5be Thomas Thrainer
    return LogicalUnit.CheckPrereq(self)
1646 763ad5be Thomas Thrainer
1647 763ad5be Thomas Thrainer
1648 763ad5be Thomas Thrainer
class LUInstanceActivateDisks(NoHooksLU):
1649 763ad5be Thomas Thrainer
  """Bring up an instance's disks.
1650 763ad5be Thomas Thrainer

1651 763ad5be Thomas Thrainer
  """
1652 763ad5be Thomas Thrainer
  REQ_BGL = False
1653 763ad5be Thomas Thrainer
1654 763ad5be Thomas Thrainer
  def ExpandNames(self):
1655 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1656 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1657 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1658 763ad5be Thomas Thrainer
1659 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1660 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1661 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1662 763ad5be Thomas Thrainer
1663 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1664 763ad5be Thomas Thrainer
    """Check prerequisites.
1665 763ad5be Thomas Thrainer

1666 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1667 763ad5be Thomas Thrainer

1668 763ad5be Thomas Thrainer
    """
1669 763ad5be Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1670 763ad5be Thomas Thrainer
    assert self.instance is not None, \
1671 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1672 5eacbcae Thomas Thrainer
    CheckNodeOnline(self, self.instance.primary_node)
1673 763ad5be Thomas Thrainer
1674 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1675 763ad5be Thomas Thrainer
    """Activate the disks.
1676 763ad5be Thomas Thrainer

1677 763ad5be Thomas Thrainer
    """
1678 763ad5be Thomas Thrainer
    disks_ok, disks_info = \
1679 5eacbcae Thomas Thrainer
              AssembleInstanceDisks(self, self.instance,
1680 5eacbcae Thomas Thrainer
                                    ignore_size=self.op.ignore_size)
1681 763ad5be Thomas Thrainer
    if not disks_ok:
1682 763ad5be Thomas Thrainer
      raise errors.OpExecError("Cannot activate block devices")
1683 763ad5be Thomas Thrainer
1684 763ad5be Thomas Thrainer
    if self.op.wait_for_sync:
1685 5eacbcae Thomas Thrainer
      if not WaitForSync(self, self.instance):
1686 763ad5be Thomas Thrainer
        raise errors.OpExecError("Some disks of the instance are degraded!")
1687 763ad5be Thomas Thrainer
1688 763ad5be Thomas Thrainer
    return disks_info
1689 763ad5be Thomas Thrainer
1690 763ad5be Thomas Thrainer
1691 763ad5be Thomas Thrainer
class LUInstanceDeactivateDisks(NoHooksLU):
1692 763ad5be Thomas Thrainer
  """Shutdown an instance's disks.
1693 763ad5be Thomas Thrainer

1694 763ad5be Thomas Thrainer
  """
1695 763ad5be Thomas Thrainer
  REQ_BGL = False
1696 763ad5be Thomas Thrainer
1697 763ad5be Thomas Thrainer
  def ExpandNames(self):
1698 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1699 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1700 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1701 763ad5be Thomas Thrainer
1702 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1703 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1704 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1705 763ad5be Thomas Thrainer
1706 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1707 763ad5be Thomas Thrainer
    """Check prerequisites.
1708 763ad5be Thomas Thrainer

1709 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1710 763ad5be Thomas Thrainer

1711 763ad5be Thomas Thrainer
    """
1712 763ad5be Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1713 763ad5be Thomas Thrainer
    assert self.instance is not None, \
1714 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1715 763ad5be Thomas Thrainer
1716 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1717 763ad5be Thomas Thrainer
    """Deactivate the disks
1718 763ad5be Thomas Thrainer

1719 763ad5be Thomas Thrainer
    """
1720 763ad5be Thomas Thrainer
    instance = self.instance
1721 763ad5be Thomas Thrainer
    if self.op.force:
1722 5eacbcae Thomas Thrainer
      ShutdownInstanceDisks(self, instance)
1723 763ad5be Thomas Thrainer
    else:
1724 763ad5be Thomas Thrainer
      _SafeShutdownInstanceDisks(self, instance)
1725 763ad5be Thomas Thrainer
1726 763ad5be Thomas Thrainer
1727 763ad5be Thomas Thrainer
def _CheckDiskConsistencyInner(lu, instance, dev, node, on_primary,
1728 763ad5be Thomas Thrainer
                               ldisk=False):
1729 763ad5be Thomas Thrainer
  """Check that mirrors are not degraded.
1730 763ad5be Thomas Thrainer

1731 763ad5be Thomas Thrainer
  @attention: The device has to be annotated already.
1732 763ad5be Thomas Thrainer

1733 763ad5be Thomas Thrainer
  The ldisk parameter, if True, will change the test from the
1734 763ad5be Thomas Thrainer
  is_degraded attribute (which represents overall non-ok status for
1735 763ad5be Thomas Thrainer
  the device(s)) to the ldisk (representing the local storage status).
1736 763ad5be Thomas Thrainer

1737 763ad5be Thomas Thrainer
  """
1738 763ad5be Thomas Thrainer
  lu.cfg.SetDiskID(dev, node)
1739 763ad5be Thomas Thrainer
1740 763ad5be Thomas Thrainer
  result = True
1741 763ad5be Thomas Thrainer
1742 763ad5be Thomas Thrainer
  if on_primary or dev.AssembleOnSecondary():
1743 763ad5be Thomas Thrainer
    rstats = lu.rpc.call_blockdev_find(node, dev)
1744 763ad5be Thomas Thrainer
    msg = rstats.fail_msg
1745 763ad5be Thomas Thrainer
    if msg:
1746 763ad5be Thomas Thrainer
      lu.LogWarning("Can't find disk on node %s: %s", node, msg)
1747 763ad5be Thomas Thrainer
      result = False
1748 763ad5be Thomas Thrainer
    elif not rstats.payload:
1749 763ad5be Thomas Thrainer
      lu.LogWarning("Can't find disk on node %s", node)
1750 763ad5be Thomas Thrainer
      result = False
1751 763ad5be Thomas Thrainer
    else:
1752 763ad5be Thomas Thrainer
      if ldisk:
1753 763ad5be Thomas Thrainer
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
1754 763ad5be Thomas Thrainer
      else:
1755 763ad5be Thomas Thrainer
        result = result and not rstats.payload.is_degraded
1756 763ad5be Thomas Thrainer
1757 763ad5be Thomas Thrainer
  if dev.children:
1758 763ad5be Thomas Thrainer
    for child in dev.children:
1759 763ad5be Thomas Thrainer
      result = result and _CheckDiskConsistencyInner(lu, instance, child, node,
1760 763ad5be Thomas Thrainer
                                                     on_primary)
1761 763ad5be Thomas Thrainer
1762 763ad5be Thomas Thrainer
  return result
1763 763ad5be Thomas Thrainer
1764 763ad5be Thomas Thrainer
1765 5eacbcae Thomas Thrainer
def CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
1766 763ad5be Thomas Thrainer
  """Wrapper around L{_CheckDiskConsistencyInner}.
1767 763ad5be Thomas Thrainer

1768 763ad5be Thomas Thrainer
  """
1769 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
1770 763ad5be Thomas Thrainer
  return _CheckDiskConsistencyInner(lu, instance, disk, node, on_primary,
1771 763ad5be Thomas Thrainer
                                    ldisk=ldisk)
1772 763ad5be Thomas Thrainer
1773 763ad5be Thomas Thrainer
1774 763ad5be Thomas Thrainer
def _BlockdevFind(lu, node, dev, instance):
1775 763ad5be Thomas Thrainer
  """Wrapper around call_blockdev_find to annotate diskparams.
1776 763ad5be Thomas Thrainer

1777 763ad5be Thomas Thrainer
  @param lu: A reference to the lu object
1778 763ad5be Thomas Thrainer
  @param node: The node to call out
1779 763ad5be Thomas Thrainer
  @param dev: The device to find
1780 763ad5be Thomas Thrainer
  @param instance: The instance object the device belongs to
1781 763ad5be Thomas Thrainer
  @returns The result of the rpc call
1782 763ad5be Thomas Thrainer

1783 763ad5be Thomas Thrainer
  """
1784 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
1785 763ad5be Thomas Thrainer
  return lu.rpc.call_blockdev_find(node, disk)
1786 763ad5be Thomas Thrainer
1787 763ad5be Thomas Thrainer
1788 763ad5be Thomas Thrainer
def _GenerateUniqueNames(lu, exts):
1789 763ad5be Thomas Thrainer
  """Generate a suitable LV name.
1790 763ad5be Thomas Thrainer

1791 763ad5be Thomas Thrainer
  This will generate a logical volume name for the given instance.
1792 763ad5be Thomas Thrainer

1793 763ad5be Thomas Thrainer
  """
1794 763ad5be Thomas Thrainer
  results = []
1795 763ad5be Thomas Thrainer
  for val in exts:
1796 763ad5be Thomas Thrainer
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
1797 763ad5be Thomas Thrainer
    results.append("%s%s" % (new_id, val))
1798 763ad5be Thomas Thrainer
  return results
1799 763ad5be Thomas Thrainer
1800 763ad5be Thomas Thrainer
1801 763ad5be Thomas Thrainer
class TLReplaceDisks(Tasklet):
1802 763ad5be Thomas Thrainer
  """Replaces disks for an instance.
1803 763ad5be Thomas Thrainer

1804 763ad5be Thomas Thrainer
  Note: Locking is not within the scope of this class.
1805 763ad5be Thomas Thrainer

1806 763ad5be Thomas Thrainer
  """
1807 763ad5be Thomas Thrainer
  def __init__(self, lu, instance_name, mode, iallocator_name, remote_node,
1808 763ad5be Thomas Thrainer
               disks, early_release, ignore_ipolicy):
1809 763ad5be Thomas Thrainer
    """Initializes this class.
1810 763ad5be Thomas Thrainer

1811 763ad5be Thomas Thrainer
    """
1812 763ad5be Thomas Thrainer
    Tasklet.__init__(self, lu)
1813 763ad5be Thomas Thrainer
1814 763ad5be Thomas Thrainer
    # Parameters
1815 763ad5be Thomas Thrainer
    self.instance_name = instance_name
1816 763ad5be Thomas Thrainer
    self.mode = mode
1817 763ad5be Thomas Thrainer
    self.iallocator_name = iallocator_name
1818 763ad5be Thomas Thrainer
    self.remote_node = remote_node
1819 763ad5be Thomas Thrainer
    self.disks = disks
1820 763ad5be Thomas Thrainer
    self.early_release = early_release
1821 763ad5be Thomas Thrainer
    self.ignore_ipolicy = ignore_ipolicy
1822 763ad5be Thomas Thrainer
1823 763ad5be Thomas Thrainer
    # Runtime data
1824 763ad5be Thomas Thrainer
    self.instance = None
1825 763ad5be Thomas Thrainer
    self.new_node = None
1826 763ad5be Thomas Thrainer
    self.target_node = None
1827 763ad5be Thomas Thrainer
    self.other_node = None
1828 763ad5be Thomas Thrainer
    self.remote_node_info = None
1829 763ad5be Thomas Thrainer
    self.node_secondary_ip = None
1830 763ad5be Thomas Thrainer
1831 763ad5be Thomas Thrainer
  @staticmethod
1832 763ad5be Thomas Thrainer
  def _RunAllocator(lu, iallocator_name, instance_name, relocate_from):
1833 763ad5be Thomas Thrainer
    """Compute a new secondary node using an IAllocator.
1834 763ad5be Thomas Thrainer

1835 763ad5be Thomas Thrainer
    """
1836 763ad5be Thomas Thrainer
    req = iallocator.IAReqRelocate(name=instance_name,
1837 763ad5be Thomas Thrainer
                                   relocate_from=list(relocate_from))
1838 763ad5be Thomas Thrainer
    ial = iallocator.IAllocator(lu.cfg, lu.rpc, req)
1839 763ad5be Thomas Thrainer
1840 763ad5be Thomas Thrainer
    ial.Run(iallocator_name)
1841 763ad5be Thomas Thrainer
1842 763ad5be Thomas Thrainer
    if not ial.success:
1843 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
1844 763ad5be Thomas Thrainer
                                 " %s" % (iallocator_name, ial.info),
1845 763ad5be Thomas Thrainer
                                 errors.ECODE_NORES)
1846 763ad5be Thomas Thrainer
1847 763ad5be Thomas Thrainer
    remote_node_name = ial.result[0]
1848 763ad5be Thomas Thrainer
1849 763ad5be Thomas Thrainer
    lu.LogInfo("Selected new secondary for instance '%s': %s",
1850 763ad5be Thomas Thrainer
               instance_name, remote_node_name)
1851 763ad5be Thomas Thrainer
1852 763ad5be Thomas Thrainer
    return remote_node_name
1853 763ad5be Thomas Thrainer
1854 763ad5be Thomas Thrainer
  def _FindFaultyDisks(self, node_name):
1855 5eacbcae Thomas Thrainer
    """Wrapper for L{FindFaultyInstanceDisks}.
1856 763ad5be Thomas Thrainer

1857 763ad5be Thomas Thrainer
    """
1858 5eacbcae Thomas Thrainer
    return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
1859 5eacbcae Thomas Thrainer
                                   node_name, True)
1860 763ad5be Thomas Thrainer
1861 763ad5be Thomas Thrainer
  def _CheckDisksActivated(self, instance):
1862 763ad5be Thomas Thrainer
    """Checks if the instance disks are activated.
1863 763ad5be Thomas Thrainer

1864 763ad5be Thomas Thrainer
    @param instance: The instance to check disks
1865 763ad5be Thomas Thrainer
    @return: True if they are activated, False otherwise
1866 763ad5be Thomas Thrainer

1867 763ad5be Thomas Thrainer
    """
1868 763ad5be Thomas Thrainer
    nodes = instance.all_nodes
1869 763ad5be Thomas Thrainer
1870 763ad5be Thomas Thrainer
    for idx, dev in enumerate(instance.disks):
1871 763ad5be Thomas Thrainer
      for node in nodes:
1872 763ad5be Thomas Thrainer
        self.lu.LogInfo("Checking disk/%d on %s", idx, node)
1873 763ad5be Thomas Thrainer
        self.cfg.SetDiskID(dev, node)
1874 763ad5be Thomas Thrainer
1875 763ad5be Thomas Thrainer
        result = _BlockdevFind(self, node, dev, instance)
1876 763ad5be Thomas Thrainer
1877 763ad5be Thomas Thrainer
        if result.offline:
1878 763ad5be Thomas Thrainer
          continue
1879 763ad5be Thomas Thrainer
        elif result.fail_msg or not result.payload:
1880 763ad5be Thomas Thrainer
          return False
1881 763ad5be Thomas Thrainer
1882 763ad5be Thomas Thrainer
    return True
1883 763ad5be Thomas Thrainer
1884 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1885 763ad5be Thomas Thrainer
    """Check prerequisites.
1886 763ad5be Thomas Thrainer

1887 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1888 763ad5be Thomas Thrainer

1889 763ad5be Thomas Thrainer
    """
1890 763ad5be Thomas Thrainer
    self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
1891 763ad5be Thomas Thrainer
    assert instance is not None, \
1892 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.instance_name
1893 763ad5be Thomas Thrainer
1894 763ad5be Thomas Thrainer
    if instance.disk_template != constants.DT_DRBD8:
1895 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
1896 763ad5be Thomas Thrainer
                                 " instances", errors.ECODE_INVAL)
1897 763ad5be Thomas Thrainer
1898 763ad5be Thomas Thrainer
    if len(instance.secondary_nodes) != 1:
1899 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The instance has a strange layout,"
1900 763ad5be Thomas Thrainer
                                 " expected one secondary but found %d" %
1901 763ad5be Thomas Thrainer
                                 len(instance.secondary_nodes),
1902 763ad5be Thomas Thrainer
                                 errors.ECODE_FAULT)
1903 763ad5be Thomas Thrainer
1904 763ad5be Thomas Thrainer
    instance = self.instance
1905 763ad5be Thomas Thrainer
    secondary_node = instance.secondary_nodes[0]
1906 763ad5be Thomas Thrainer
1907 763ad5be Thomas Thrainer
    if self.iallocator_name is None:
1908 763ad5be Thomas Thrainer
      remote_node = self.remote_node
1909 763ad5be Thomas Thrainer
    else:
1910 763ad5be Thomas Thrainer
      remote_node = self._RunAllocator(self.lu, self.iallocator_name,
1911 763ad5be Thomas Thrainer
                                       instance.name, instance.secondary_nodes)
1912 763ad5be Thomas Thrainer
1913 763ad5be Thomas Thrainer
    if remote_node is None:
1914 763ad5be Thomas Thrainer
      self.remote_node_info = None
1915 763ad5be Thomas Thrainer
    else:
1916 763ad5be Thomas Thrainer
      assert remote_node in self.lu.owned_locks(locking.LEVEL_NODE), \
1917 763ad5be Thomas Thrainer
             "Remote node '%s' is not locked" % remote_node
1918 763ad5be Thomas Thrainer
1919 763ad5be Thomas Thrainer
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
1920 763ad5be Thomas Thrainer
      assert self.remote_node_info is not None, \
1921 763ad5be Thomas Thrainer
        "Cannot retrieve locked node %s" % remote_node
1922 763ad5be Thomas Thrainer
1923 763ad5be Thomas Thrainer
    if remote_node == self.instance.primary_node:
1924 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The specified node is the primary node of"
1925 763ad5be Thomas Thrainer
                                 " the instance", errors.ECODE_INVAL)
1926 763ad5be Thomas Thrainer
1927 763ad5be Thomas Thrainer
    if remote_node == secondary_node:
1928 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The specified node is already the"
1929 763ad5be Thomas Thrainer
                                 " secondary node of the instance",
1930 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
1931 763ad5be Thomas Thrainer
1932 763ad5be Thomas Thrainer
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
1933 763ad5be Thomas Thrainer
                                    constants.REPLACE_DISK_CHG):
1934 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
1935 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
1936 763ad5be Thomas Thrainer
1937 763ad5be Thomas Thrainer
    if self.mode == constants.REPLACE_DISK_AUTO:
1938 763ad5be Thomas Thrainer
      if not self._CheckDisksActivated(instance):
1939 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Please run activate-disks on instance %s"
1940 763ad5be Thomas Thrainer
                                   " first" % self.instance_name,
1941 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
1942 763ad5be Thomas Thrainer
      faulty_primary = self._FindFaultyDisks(instance.primary_node)
1943 763ad5be Thomas Thrainer
      faulty_secondary = self._FindFaultyDisks(secondary_node)
1944 763ad5be Thomas Thrainer
1945 763ad5be Thomas Thrainer
      if faulty_primary and faulty_secondary:
1946 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
1947 763ad5be Thomas Thrainer
                                   " one node and can not be repaired"
1948 763ad5be Thomas Thrainer
                                   " automatically" % self.instance_name,
1949 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
1950 763ad5be Thomas Thrainer
1951 763ad5be Thomas Thrainer
      if faulty_primary:
1952 763ad5be Thomas Thrainer
        self.disks = faulty_primary
1953 763ad5be Thomas Thrainer
        self.target_node = instance.primary_node
1954 763ad5be Thomas Thrainer
        self.other_node = secondary_node
1955 763ad5be Thomas Thrainer
        check_nodes = [self.target_node, self.other_node]
1956 763ad5be Thomas Thrainer
      elif faulty_secondary:
1957 763ad5be Thomas Thrainer
        self.disks = faulty_secondary
1958 763ad5be Thomas Thrainer
        self.target_node = secondary_node
1959 763ad5be Thomas Thrainer
        self.other_node = instance.primary_node
1960 763ad5be Thomas Thrainer
        check_nodes = [self.target_node, self.other_node]
1961 763ad5be Thomas Thrainer
      else:
1962 763ad5be Thomas Thrainer
        self.disks = []
1963 763ad5be Thomas Thrainer
        check_nodes = []
1964 763ad5be Thomas Thrainer
1965 763ad5be Thomas Thrainer
    else:
1966 763ad5be Thomas Thrainer
      # Non-automatic modes
1967 763ad5be Thomas Thrainer
      if self.mode == constants.REPLACE_DISK_PRI:
1968 763ad5be Thomas Thrainer
        self.target_node = instance.primary_node
1969 763ad5be Thomas Thrainer
        self.other_node = secondary_node
1970 763ad5be Thomas Thrainer
        check_nodes = [self.target_node, self.other_node]
1971 763ad5be Thomas Thrainer
1972 763ad5be Thomas Thrainer
      elif self.mode == constants.REPLACE_DISK_SEC:
1973 763ad5be Thomas Thrainer
        self.target_node = secondary_node
1974 763ad5be Thomas Thrainer
        self.other_node = instance.primary_node
1975 763ad5be Thomas Thrainer
        check_nodes = [self.target_node, self.other_node]
1976 763ad5be Thomas Thrainer
1977 763ad5be Thomas Thrainer
      elif self.mode == constants.REPLACE_DISK_CHG:
1978 763ad5be Thomas Thrainer
        self.new_node = remote_node
1979 763ad5be Thomas Thrainer
        self.other_node = instance.primary_node
1980 763ad5be Thomas Thrainer
        self.target_node = secondary_node
1981 763ad5be Thomas Thrainer
        check_nodes = [self.new_node, self.other_node]
1982 763ad5be Thomas Thrainer
1983 5eacbcae Thomas Thrainer
        CheckNodeNotDrained(self.lu, remote_node)
1984 5eacbcae Thomas Thrainer
        CheckNodeVmCapable(self.lu, remote_node)
1985 763ad5be Thomas Thrainer
1986 763ad5be Thomas Thrainer
        old_node_info = self.cfg.GetNodeInfo(secondary_node)
1987 763ad5be Thomas Thrainer
        assert old_node_info is not None
1988 763ad5be Thomas Thrainer
        if old_node_info.offline and not self.early_release:
1989 763ad5be Thomas Thrainer
          # doesn't make sense to delay the release
1990 763ad5be Thomas Thrainer
          self.early_release = True
1991 763ad5be Thomas Thrainer
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
1992 763ad5be Thomas Thrainer
                          " early-release mode", secondary_node)
1993 763ad5be Thomas Thrainer
1994 763ad5be Thomas Thrainer
      else:
1995 763ad5be Thomas Thrainer
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
1996 763ad5be Thomas Thrainer
                                     self.mode)
1997 763ad5be Thomas Thrainer
1998 763ad5be Thomas Thrainer
      # If not specified all disks should be replaced
1999 763ad5be Thomas Thrainer
      if not self.disks:
2000 763ad5be Thomas Thrainer
        self.disks = range(len(self.instance.disks))
2001 763ad5be Thomas Thrainer
2002 763ad5be Thomas Thrainer
    # TODO: This is ugly, but right now we can't distinguish between internal
2003 763ad5be Thomas Thrainer
    # submitted opcode and external one. We should fix that.
2004 763ad5be Thomas Thrainer
    if self.remote_node_info:
2005 763ad5be Thomas Thrainer
      # We change the node, lets verify it still meets instance policy
2006 763ad5be Thomas Thrainer
      new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
2007 763ad5be Thomas Thrainer
      cluster = self.cfg.GetClusterInfo()
2008 763ad5be Thomas Thrainer
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2009 763ad5be Thomas Thrainer
                                                              new_group_info)
2010 5eacbcae Thomas Thrainer
      CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
2011 5eacbcae Thomas Thrainer
                             self.cfg, ignore=self.ignore_ipolicy)
2012 763ad5be Thomas Thrainer
2013 763ad5be Thomas Thrainer
    for node in check_nodes:
2014 5eacbcae Thomas Thrainer
      CheckNodeOnline(self.lu, node)
2015 763ad5be Thomas Thrainer
2016 763ad5be Thomas Thrainer
    touched_nodes = frozenset(node_name for node_name in [self.new_node,
2017 763ad5be Thomas Thrainer
                                                          self.other_node,
2018 763ad5be Thomas Thrainer
                                                          self.target_node]
2019 763ad5be Thomas Thrainer
                              if node_name is not None)
2020 763ad5be Thomas Thrainer
2021 763ad5be Thomas Thrainer
    # Release unneeded node and node resource locks
2022 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
2023 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
2024 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
2025 763ad5be Thomas Thrainer
2026 763ad5be Thomas Thrainer
    # Release any owned node group
2027 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
2028 763ad5be Thomas Thrainer
2029 763ad5be Thomas Thrainer
    # Check whether disks are valid
2030 763ad5be Thomas Thrainer
    for disk_idx in self.disks:
2031 763ad5be Thomas Thrainer
      instance.FindDisk(disk_idx)
2032 763ad5be Thomas Thrainer
2033 763ad5be Thomas Thrainer
    # Get secondary node IP addresses
2034 763ad5be Thomas Thrainer
    self.node_secondary_ip = dict((name, node.secondary_ip) for (name, node)
2035 763ad5be Thomas Thrainer
                                  in self.cfg.GetMultiNodeInfo(touched_nodes))
2036 763ad5be Thomas Thrainer
2037 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
2038 763ad5be Thomas Thrainer
    """Execute disk replacement.
2039 763ad5be Thomas Thrainer

2040 763ad5be Thomas Thrainer
    This dispatches the disk replacement to the appropriate handler.
2041 763ad5be Thomas Thrainer

2042 763ad5be Thomas Thrainer
    """
2043 763ad5be Thomas Thrainer
    if __debug__:
2044 763ad5be Thomas Thrainer
      # Verify owned locks before starting operation
2045 763ad5be Thomas Thrainer
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
2046 763ad5be Thomas Thrainer
      assert set(owned_nodes) == set(self.node_secondary_ip), \
2047 763ad5be Thomas Thrainer
          ("Incorrect node locks, owning %s, expected %s" %
2048 763ad5be Thomas Thrainer
           (owned_nodes, self.node_secondary_ip.keys()))
2049 763ad5be Thomas Thrainer
      assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
2050 763ad5be Thomas Thrainer
              self.lu.owned_locks(locking.LEVEL_NODE_RES))
2051 763ad5be Thomas Thrainer
      assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
2052 763ad5be Thomas Thrainer
2053 763ad5be Thomas Thrainer
      owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
2054 763ad5be Thomas Thrainer
      assert list(owned_instances) == [self.instance_name], \
2055 763ad5be Thomas Thrainer
          "Instance '%s' not locked" % self.instance_name
2056 763ad5be Thomas Thrainer
2057 763ad5be Thomas Thrainer
      assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
2058 763ad5be Thomas Thrainer
          "Should not own any node group lock at this point"
2059 763ad5be Thomas Thrainer
2060 763ad5be Thomas Thrainer
    if not self.disks:
2061 763ad5be Thomas Thrainer
      feedback_fn("No disks need replacement for instance '%s'" %
2062 763ad5be Thomas Thrainer
                  self.instance.name)
2063 763ad5be Thomas Thrainer
      return
2064 763ad5be Thomas Thrainer
2065 763ad5be Thomas Thrainer
    feedback_fn("Replacing disk(s) %s for instance '%s'" %
2066 763ad5be Thomas Thrainer
                (utils.CommaJoin(self.disks), self.instance.name))
2067 763ad5be Thomas Thrainer
    feedback_fn("Current primary node: %s" % self.instance.primary_node)
2068 763ad5be Thomas Thrainer
    feedback_fn("Current seconary node: %s" %
2069 763ad5be Thomas Thrainer
                utils.CommaJoin(self.instance.secondary_nodes))
2070 763ad5be Thomas Thrainer
2071 763ad5be Thomas Thrainer
    activate_disks = (self.instance.admin_state != constants.ADMINST_UP)
2072 763ad5be Thomas Thrainer
2073 763ad5be Thomas Thrainer
    # Activate the instance disks if we're replacing them on a down instance
2074 763ad5be Thomas Thrainer
    if activate_disks:
2075 5eacbcae Thomas Thrainer
      StartInstanceDisks(self.lu, self.instance, True)
2076 763ad5be Thomas Thrainer
2077 763ad5be Thomas Thrainer
    try:
2078 763ad5be Thomas Thrainer
      # Should we replace the secondary node?
2079 763ad5be Thomas Thrainer
      if self.new_node is not None:
2080 763ad5be Thomas Thrainer
        fn = self._ExecDrbd8Secondary
2081 763ad5be Thomas Thrainer
      else:
2082 763ad5be Thomas Thrainer
        fn = self._ExecDrbd8DiskOnly
2083 763ad5be Thomas Thrainer
2084 763ad5be Thomas Thrainer
      result = fn(feedback_fn)
2085 763ad5be Thomas Thrainer
    finally:
2086 763ad5be Thomas Thrainer
      # Deactivate the instance disks if we're replacing them on a
2087 763ad5be Thomas Thrainer
      # down instance
2088 763ad5be Thomas Thrainer
      if activate_disks:
2089 763ad5be Thomas Thrainer
        _SafeShutdownInstanceDisks(self.lu, self.instance)
2090 763ad5be Thomas Thrainer
2091 763ad5be Thomas Thrainer
    assert not self.lu.owned_locks(locking.LEVEL_NODE)
2092 763ad5be Thomas Thrainer
2093 763ad5be Thomas Thrainer
    if __debug__:
2094 763ad5be Thomas Thrainer
      # Verify owned locks
2095 763ad5be Thomas Thrainer
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
2096 763ad5be Thomas Thrainer
      nodes = frozenset(self.node_secondary_ip)
2097 763ad5be Thomas Thrainer
      assert ((self.early_release and not owned_nodes) or
2098 763ad5be Thomas Thrainer
              (not self.early_release and not (set(owned_nodes) - nodes))), \
2099 763ad5be Thomas Thrainer
        ("Not owning the correct locks, early_release=%s, owned=%r,"
2100 763ad5be Thomas Thrainer
         " nodes=%r" % (self.early_release, owned_nodes, nodes))
2101 763ad5be Thomas Thrainer
2102 763ad5be Thomas Thrainer
    return result
2103 763ad5be Thomas Thrainer
2104 763ad5be Thomas Thrainer
  def _CheckVolumeGroup(self, nodes):
2105 763ad5be Thomas Thrainer
    self.lu.LogInfo("Checking volume groups")
2106 763ad5be Thomas Thrainer
2107 763ad5be Thomas Thrainer
    vgname = self.cfg.GetVGName()
2108 763ad5be Thomas Thrainer
2109 763ad5be Thomas Thrainer
    # Make sure volume group exists on all involved nodes
2110 763ad5be Thomas Thrainer
    results = self.rpc.call_vg_list(nodes)
2111 763ad5be Thomas Thrainer
    if not results:
2112 763ad5be Thomas Thrainer
      raise errors.OpExecError("Can't list volume groups on the nodes")
2113 763ad5be Thomas Thrainer
2114 763ad5be Thomas Thrainer
    for node in nodes:
2115 763ad5be Thomas Thrainer
      res = results[node]
2116 763ad5be Thomas Thrainer
      res.Raise("Error checking node %s" % node)
2117 763ad5be Thomas Thrainer
      if vgname not in res.payload:
2118 763ad5be Thomas Thrainer
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
2119 763ad5be Thomas Thrainer
                                 (vgname, node))
2120 763ad5be Thomas Thrainer
2121 763ad5be Thomas Thrainer
  def _CheckDisksExistence(self, nodes):
2122 763ad5be Thomas Thrainer
    # Check disk existence
2123 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2124 763ad5be Thomas Thrainer
      if idx not in self.disks:
2125 763ad5be Thomas Thrainer
        continue
2126 763ad5be Thomas Thrainer
2127 763ad5be Thomas Thrainer
      for node in nodes:
2128 763ad5be Thomas Thrainer
        self.lu.LogInfo("Checking disk/%d on %s", idx, node)
2129 763ad5be Thomas Thrainer
        self.cfg.SetDiskID(dev, node)
2130 763ad5be Thomas Thrainer
2131 763ad5be Thomas Thrainer
        result = _BlockdevFind(self, node, dev, self.instance)
2132 763ad5be Thomas Thrainer
2133 763ad5be Thomas Thrainer
        msg = result.fail_msg
2134 763ad5be Thomas Thrainer
        if msg or not result.payload:
2135 763ad5be Thomas Thrainer
          if not msg:
2136 763ad5be Thomas Thrainer
            msg = "disk not found"
2137 763ad5be Thomas Thrainer
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
2138 763ad5be Thomas Thrainer
                                   (idx, node, msg))
2139 763ad5be Thomas Thrainer
2140 763ad5be Thomas Thrainer
  def _CheckDisksConsistency(self, node_name, on_primary, ldisk):
2141 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2142 763ad5be Thomas Thrainer
      if idx not in self.disks:
2143 763ad5be Thomas Thrainer
        continue
2144 763ad5be Thomas Thrainer
2145 763ad5be Thomas Thrainer
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
2146 763ad5be Thomas Thrainer
                      (idx, node_name))
2147 763ad5be Thomas Thrainer
2148 5eacbcae Thomas Thrainer
      if not CheckDiskConsistency(self.lu, self.instance, dev, node_name,
2149 5eacbcae Thomas Thrainer
                                  on_primary, ldisk=ldisk):
2150 763ad5be Thomas Thrainer
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
2151 763ad5be Thomas Thrainer
                                 " replace disks for instance %s" %
2152 763ad5be Thomas Thrainer
                                 (node_name, self.instance.name))
2153 763ad5be Thomas Thrainer
2154 763ad5be Thomas Thrainer
  def _CreateNewStorage(self, node_name):
2155 763ad5be Thomas Thrainer
    """Create new storage on the primary or secondary node.
2156 763ad5be Thomas Thrainer

2157 763ad5be Thomas Thrainer
    This is only used for same-node replaces, not for changing the
2158 763ad5be Thomas Thrainer
    secondary node, hence we don't want to modify the existing disk.
2159 763ad5be Thomas Thrainer

2160 763ad5be Thomas Thrainer
    """
2161 763ad5be Thomas Thrainer
    iv_names = {}
2162 763ad5be Thomas Thrainer
2163 5eacbcae Thomas Thrainer
    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
2164 763ad5be Thomas Thrainer
    for idx, dev in enumerate(disks):
2165 763ad5be Thomas Thrainer
      if idx not in self.disks:
2166 763ad5be Thomas Thrainer
        continue
2167 763ad5be Thomas Thrainer
2168 763ad5be Thomas Thrainer
      self.lu.LogInfo("Adding storage on %s for disk/%d", node_name, idx)
2169 763ad5be Thomas Thrainer
2170 763ad5be Thomas Thrainer
      self.cfg.SetDiskID(dev, node_name)
2171 763ad5be Thomas Thrainer
2172 763ad5be Thomas Thrainer
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
2173 763ad5be Thomas Thrainer
      names = _GenerateUniqueNames(self.lu, lv_names)
2174 763ad5be Thomas Thrainer
2175 763ad5be Thomas Thrainer
      (data_disk, meta_disk) = dev.children
2176 763ad5be Thomas Thrainer
      vg_data = data_disk.logical_id[0]
2177 763ad5be Thomas Thrainer
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
2178 763ad5be Thomas Thrainer
                             logical_id=(vg_data, names[0]),
2179 763ad5be Thomas Thrainer
                             params=data_disk.params)
2180 763ad5be Thomas Thrainer
      vg_meta = meta_disk.logical_id[0]
2181 763ad5be Thomas Thrainer
      lv_meta = objects.Disk(dev_type=constants.LD_LV,
2182 763ad5be Thomas Thrainer
                             size=constants.DRBD_META_SIZE,
2183 763ad5be Thomas Thrainer
                             logical_id=(vg_meta, names[1]),
2184 763ad5be Thomas Thrainer
                             params=meta_disk.params)
2185 763ad5be Thomas Thrainer
2186 763ad5be Thomas Thrainer
      new_lvs = [lv_data, lv_meta]
2187 763ad5be Thomas Thrainer
      old_lvs = [child.Copy() for child in dev.children]
2188 763ad5be Thomas Thrainer
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
2189 5eacbcae Thomas Thrainer
      excl_stor = IsExclusiveStorageEnabledNodeName(self.lu.cfg, node_name)
2190 763ad5be Thomas Thrainer
2191 763ad5be Thomas Thrainer
      # we pass force_create=True to force the LVM creation
2192 763ad5be Thomas Thrainer
      for new_lv in new_lvs:
2193 763ad5be Thomas Thrainer
        _CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
2194 5eacbcae Thomas Thrainer
                             GetInstanceInfoText(self.instance), False,
2195 763ad5be Thomas Thrainer
                             excl_stor)
2196 763ad5be Thomas Thrainer
2197 763ad5be Thomas Thrainer
    return iv_names
2198 763ad5be Thomas Thrainer
2199 763ad5be Thomas Thrainer
  def _CheckDevices(self, node_name, iv_names):
2200 763ad5be Thomas Thrainer
    for name, (dev, _, _) in iv_names.iteritems():
2201 763ad5be Thomas Thrainer
      self.cfg.SetDiskID(dev, node_name)
2202 763ad5be Thomas Thrainer
2203 763ad5be Thomas Thrainer
      result = _BlockdevFind(self, node_name, dev, self.instance)
2204 763ad5be Thomas Thrainer
2205 763ad5be Thomas Thrainer
      msg = result.fail_msg
2206 763ad5be Thomas Thrainer
      if msg or not result.payload:
2207 763ad5be Thomas Thrainer
        if not msg:
2208 763ad5be Thomas Thrainer
          msg = "disk not found"
2209 763ad5be Thomas Thrainer
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
2210 763ad5be Thomas Thrainer
                                 (name, msg))
2211 763ad5be Thomas Thrainer
2212 763ad5be Thomas Thrainer
      if result.payload.is_degraded:
2213 763ad5be Thomas Thrainer
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
2214 763ad5be Thomas Thrainer
2215 763ad5be Thomas Thrainer
  def _RemoveOldStorage(self, node_name, iv_names):
2216 763ad5be Thomas Thrainer
    for name, (_, old_lvs, _) in iv_names.iteritems():
2217 763ad5be Thomas Thrainer
      self.lu.LogInfo("Remove logical volumes for %s", name)
2218 763ad5be Thomas Thrainer
2219 763ad5be Thomas Thrainer
      for lv in old_lvs:
2220 763ad5be Thomas Thrainer
        self.cfg.SetDiskID(lv, node_name)
2221 763ad5be Thomas Thrainer
2222 763ad5be Thomas Thrainer
        msg = self.rpc.call_blockdev_remove(node_name, lv).fail_msg
2223 763ad5be Thomas Thrainer
        if msg:
2224 763ad5be Thomas Thrainer
          self.lu.LogWarning("Can't remove old LV: %s", msg,
2225 763ad5be Thomas Thrainer
                             hint="remove unused LVs manually")
2226 763ad5be Thomas Thrainer
2227 763ad5be Thomas Thrainer
  def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
2228 763ad5be Thomas Thrainer
    """Replace a disk on the primary or secondary for DRBD 8.
2229 763ad5be Thomas Thrainer

2230 763ad5be Thomas Thrainer
    The algorithm for replace is quite complicated:
2231 763ad5be Thomas Thrainer

2232 763ad5be Thomas Thrainer
      1. for each disk to be replaced:
2233 763ad5be Thomas Thrainer

2234 763ad5be Thomas Thrainer
        1. create new LVs on the target node with unique names
2235 763ad5be Thomas Thrainer
        1. detach old LVs from the drbd device
2236 763ad5be Thomas Thrainer
        1. rename old LVs to name_replaced.<time_t>
2237 763ad5be Thomas Thrainer
        1. rename new LVs to old LVs
2238 763ad5be Thomas Thrainer
        1. attach the new LVs (with the old names now) to the drbd device
2239 763ad5be Thomas Thrainer

2240 763ad5be Thomas Thrainer
      1. wait for sync across all devices
2241 763ad5be Thomas Thrainer

2242 763ad5be Thomas Thrainer
      1. for each modified disk:
2243 763ad5be Thomas Thrainer

2244 763ad5be Thomas Thrainer
        1. remove old LVs (which have the name name_replaces.<time_t>)
2245 763ad5be Thomas Thrainer

2246 763ad5be Thomas Thrainer
    Failures are not very well handled.
2247 763ad5be Thomas Thrainer

2248 763ad5be Thomas Thrainer
    """
2249 763ad5be Thomas Thrainer
    steps_total = 6
2250 763ad5be Thomas Thrainer
2251 763ad5be Thomas Thrainer
    # Step: check device activation
2252 763ad5be Thomas Thrainer
    self.lu.LogStep(1, steps_total, "Check device existence")
2253 763ad5be Thomas Thrainer
    self._CheckDisksExistence([self.other_node, self.target_node])
2254 763ad5be Thomas Thrainer
    self._CheckVolumeGroup([self.target_node, self.other_node])
2255 763ad5be Thomas Thrainer
2256 763ad5be Thomas Thrainer
    # Step: check other node consistency
2257 763ad5be Thomas Thrainer
    self.lu.LogStep(2, steps_total, "Check peer consistency")
2258 763ad5be Thomas Thrainer
    self._CheckDisksConsistency(self.other_node,
2259 763ad5be Thomas Thrainer
                                self.other_node == self.instance.primary_node,
2260 763ad5be Thomas Thrainer
                                False)
2261 763ad5be Thomas Thrainer
2262 763ad5be Thomas Thrainer
    # Step: create new storage
2263 763ad5be Thomas Thrainer
    self.lu.LogStep(3, steps_total, "Allocate new storage")
2264 763ad5be Thomas Thrainer
    iv_names = self._CreateNewStorage(self.target_node)
2265 763ad5be Thomas Thrainer
2266 763ad5be Thomas Thrainer
    # Step: for each lv, detach+rename*2+attach
2267 763ad5be Thomas Thrainer
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
2268 763ad5be Thomas Thrainer
    for dev, old_lvs, new_lvs in iv_names.itervalues():
2269 763ad5be Thomas Thrainer
      self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name)
2270 763ad5be Thomas Thrainer
2271 763ad5be Thomas Thrainer
      result = self.rpc.call_blockdev_removechildren(self.target_node, dev,
2272 763ad5be Thomas Thrainer
                                                     old_lvs)
2273 763ad5be Thomas Thrainer
      result.Raise("Can't detach drbd from local storage on node"
2274 763ad5be Thomas Thrainer
                   " %s for device %s" % (self.target_node, dev.iv_name))
2275 763ad5be Thomas Thrainer
      #dev.children = []
2276 763ad5be Thomas Thrainer
      #cfg.Update(instance)
2277 763ad5be Thomas Thrainer
2278 763ad5be Thomas Thrainer
      # ok, we created the new LVs, so now we know we have the needed
2279 763ad5be Thomas Thrainer
      # storage; as such, we proceed on the target node to rename
2280 763ad5be Thomas Thrainer
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
2281 763ad5be Thomas Thrainer
      # using the assumption that logical_id == physical_id (which in
2282 763ad5be Thomas Thrainer
      # turn is the unique_id on that node)
2283 763ad5be Thomas Thrainer
2284 763ad5be Thomas Thrainer
      # FIXME(iustin): use a better name for the replaced LVs
2285 763ad5be Thomas Thrainer
      temp_suffix = int(time.time())
2286 763ad5be Thomas Thrainer
      ren_fn = lambda d, suff: (d.physical_id[0],
2287 763ad5be Thomas Thrainer
                                d.physical_id[1] + "_replaced-%s" % suff)
2288 763ad5be Thomas Thrainer
2289 763ad5be Thomas Thrainer
      # Build the rename list based on what LVs exist on the node
2290 763ad5be Thomas Thrainer
      rename_old_to_new = []
2291 763ad5be Thomas Thrainer
      for to_ren in old_lvs:
2292 763ad5be Thomas Thrainer
        result = self.rpc.call_blockdev_find(self.target_node, to_ren)
2293 763ad5be Thomas Thrainer
        if not result.fail_msg and result.payload:
2294 763ad5be Thomas Thrainer
          # device exists
2295 763ad5be Thomas Thrainer
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
2296 763ad5be Thomas Thrainer
2297 763ad5be Thomas Thrainer
      self.lu.LogInfo("Renaming the old LVs on the target node")
2298 763ad5be Thomas Thrainer
      result = self.rpc.call_blockdev_rename(self.target_node,
2299 763ad5be Thomas Thrainer
                                             rename_old_to_new)
2300 763ad5be Thomas Thrainer
      result.Raise("Can't rename old LVs on node %s" % self.target_node)
2301 763ad5be Thomas Thrainer
2302 763ad5be Thomas Thrainer
      # Now we rename the new LVs to the old LVs
2303 763ad5be Thomas Thrainer
      self.lu.LogInfo("Renaming the new LVs on the target node")
2304 763ad5be Thomas Thrainer
      rename_new_to_old = [(new, old.physical_id)
2305 763ad5be Thomas Thrainer
                           for old, new in zip(old_lvs, new_lvs)]
2306 763ad5be Thomas Thrainer
      result = self.rpc.call_blockdev_rename(self.target_node,
2307 763ad5be Thomas Thrainer
                                             rename_new_to_old)
2308 763ad5be Thomas Thrainer
      result.Raise("Can't rename new LVs on node %s" % self.target_node)
2309 763ad5be Thomas Thrainer
2310 763ad5be Thomas Thrainer
      # Intermediate steps of in memory modifications
2311 763ad5be Thomas Thrainer
      for old, new in zip(old_lvs, new_lvs):
2312 763ad5be Thomas Thrainer
        new.logical_id = old.logical_id
2313 763ad5be Thomas Thrainer
        self.cfg.SetDiskID(new, self.target_node)
2314 763ad5be Thomas Thrainer
2315 763ad5be Thomas Thrainer
      # We need to modify old_lvs so that removal later removes the
2316 763ad5be Thomas Thrainer
      # right LVs, not the newly added ones; note that old_lvs is a
2317 763ad5be Thomas Thrainer
      # copy here
2318 763ad5be Thomas Thrainer
      for disk in old_lvs:
2319 763ad5be Thomas Thrainer
        disk.logical_id = ren_fn(disk, temp_suffix)
2320 763ad5be Thomas Thrainer
        self.cfg.SetDiskID(disk, self.target_node)
2321 763ad5be Thomas Thrainer
2322 763ad5be Thomas Thrainer
      # Now that the new lvs have the old name, we can add them to the device
2323 763ad5be Thomas Thrainer
      self.lu.LogInfo("Adding new mirror component on %s", self.target_node)
2324 763ad5be Thomas Thrainer
      result = self.rpc.call_blockdev_addchildren(self.target_node,
2325 763ad5be Thomas Thrainer
                                                  (dev, self.instance), new_lvs)
2326 763ad5be Thomas Thrainer
      msg = result.fail_msg
2327 763ad5be Thomas Thrainer
      if msg:
2328 763ad5be Thomas Thrainer
        for new_lv in new_lvs:
2329 763ad5be Thomas Thrainer
          msg2 = self.rpc.call_blockdev_remove(self.target_node,
2330 763ad5be Thomas Thrainer
                                               new_lv).fail_msg
2331 763ad5be Thomas Thrainer
          if msg2:
2332 763ad5be Thomas Thrainer
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
2333 763ad5be Thomas Thrainer
                               hint=("cleanup manually the unused logical"
2334 763ad5be Thomas Thrainer
                                     "volumes"))
2335 763ad5be Thomas Thrainer
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
2336 763ad5be Thomas Thrainer
2337 763ad5be Thomas Thrainer
    cstep = itertools.count(5)
2338 763ad5be Thomas Thrainer
2339 763ad5be Thomas Thrainer
    if self.early_release:
2340 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2341 763ad5be Thomas Thrainer
      self._RemoveOldStorage(self.target_node, iv_names)
2342 763ad5be Thomas Thrainer
      # TODO: Check if releasing locks early still makes sense
2343 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
2344 763ad5be Thomas Thrainer
    else:
2345 763ad5be Thomas Thrainer
      # Release all resource locks except those used by the instance
2346 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
2347 5eacbcae Thomas Thrainer
                   keep=self.node_secondary_ip.keys())
2348 763ad5be Thomas Thrainer
2349 763ad5be Thomas Thrainer
    # Release all node locks while waiting for sync
2350 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE)
2351 763ad5be Thomas Thrainer
2352 763ad5be Thomas Thrainer
    # TODO: Can the instance lock be downgraded here? Take the optional disk
2353 763ad5be Thomas Thrainer
    # shutdown in the caller into consideration.
2354 763ad5be Thomas Thrainer
2355 763ad5be Thomas Thrainer
    # Wait for sync
2356 763ad5be Thomas Thrainer
    # This can fail as the old devices are degraded and _WaitForSync
2357 763ad5be Thomas Thrainer
    # does a combined result over all disks, so we don't check its return value
2358 763ad5be Thomas Thrainer
    self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
2359 5eacbcae Thomas Thrainer
    WaitForSync(self.lu, self.instance)
2360 763ad5be Thomas Thrainer
2361 763ad5be Thomas Thrainer
    # Check all devices manually
2362 763ad5be Thomas Thrainer
    self._CheckDevices(self.instance.primary_node, iv_names)
2363 763ad5be Thomas Thrainer
2364 763ad5be Thomas Thrainer
    # Step: remove old storage
2365 763ad5be Thomas Thrainer
    if not self.early_release:
2366 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2367 763ad5be Thomas Thrainer
      self._RemoveOldStorage(self.target_node, iv_names)
2368 763ad5be Thomas Thrainer
2369 763ad5be Thomas Thrainer
  def _ExecDrbd8Secondary(self, feedback_fn):
2370 763ad5be Thomas Thrainer
    """Replace the secondary node for DRBD 8.
2371 763ad5be Thomas Thrainer

2372 763ad5be Thomas Thrainer
    The algorithm for replace is quite complicated:
2373 763ad5be Thomas Thrainer
      - for all disks of the instance:
2374 763ad5be Thomas Thrainer
        - create new LVs on the new node with same names
2375 763ad5be Thomas Thrainer
        - shutdown the drbd device on the old secondary
2376 763ad5be Thomas Thrainer
        - disconnect the drbd network on the primary
2377 763ad5be Thomas Thrainer
        - create the drbd device on the new secondary
2378 763ad5be Thomas Thrainer
        - network attach the drbd on the primary, using an artifice:
2379 763ad5be Thomas Thrainer
          the drbd code for Attach() will connect to the network if it
2380 763ad5be Thomas Thrainer
          finds a device which is connected to the good local disks but
2381 763ad5be Thomas Thrainer
          not network enabled
2382 763ad5be Thomas Thrainer
      - wait for sync across all devices
2383 763ad5be Thomas Thrainer
      - remove all disks from the old secondary
2384 763ad5be Thomas Thrainer

2385 763ad5be Thomas Thrainer
    Failures are not very well handled.
2386 763ad5be Thomas Thrainer

2387 763ad5be Thomas Thrainer
    """
2388 763ad5be Thomas Thrainer
    steps_total = 6
2389 763ad5be Thomas Thrainer
2390 763ad5be Thomas Thrainer
    pnode = self.instance.primary_node
2391 763ad5be Thomas Thrainer
2392 763ad5be Thomas Thrainer
    # Step: check device activation
2393 763ad5be Thomas Thrainer
    self.lu.LogStep(1, steps_total, "Check device existence")
2394 763ad5be Thomas Thrainer
    self._CheckDisksExistence([self.instance.primary_node])
2395 763ad5be Thomas Thrainer
    self._CheckVolumeGroup([self.instance.primary_node])
2396 763ad5be Thomas Thrainer
2397 763ad5be Thomas Thrainer
    # Step: check other node consistency
2398 763ad5be Thomas Thrainer
    self.lu.LogStep(2, steps_total, "Check peer consistency")
2399 763ad5be Thomas Thrainer
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
2400 763ad5be Thomas Thrainer
2401 763ad5be Thomas Thrainer
    # Step: create new storage
2402 763ad5be Thomas Thrainer
    self.lu.LogStep(3, steps_total, "Allocate new storage")
2403 5eacbcae Thomas Thrainer
    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
2404 5eacbcae Thomas Thrainer
    excl_stor = IsExclusiveStorageEnabledNodeName(self.lu.cfg, self.new_node)
2405 763ad5be Thomas Thrainer
    for idx, dev in enumerate(disks):
2406 763ad5be Thomas Thrainer
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
2407 763ad5be Thomas Thrainer
                      (self.new_node, idx))
2408 763ad5be Thomas Thrainer
      # we pass force_create=True to force LVM creation
2409 763ad5be Thomas Thrainer
      for new_lv in dev.children:
2410 763ad5be Thomas Thrainer
        _CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
2411 5eacbcae Thomas Thrainer
                             True, GetInstanceInfoText(self.instance), False,
2412 763ad5be Thomas Thrainer
                             excl_stor)
2413 763ad5be Thomas Thrainer
2414 763ad5be Thomas Thrainer
    # Step 4: dbrd minors and drbd setups changes
2415 763ad5be Thomas Thrainer
    # after this, we must manually remove the drbd minors on both the
2416 763ad5be Thomas Thrainer
    # error and the success paths
2417 763ad5be Thomas Thrainer
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
2418 763ad5be Thomas Thrainer
    minors = self.cfg.AllocateDRBDMinor([self.new_node
2419 763ad5be Thomas Thrainer
                                         for dev in self.instance.disks],
2420 763ad5be Thomas Thrainer
                                        self.instance.name)
2421 763ad5be Thomas Thrainer
    logging.debug("Allocated minors %r", minors)
2422 763ad5be Thomas Thrainer
2423 763ad5be Thomas Thrainer
    iv_names = {}
2424 763ad5be Thomas Thrainer
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
2425 763ad5be Thomas Thrainer
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
2426 763ad5be Thomas Thrainer
                      (self.new_node, idx))
2427 763ad5be Thomas Thrainer
      # create new devices on new_node; note that we create two IDs:
2428 763ad5be Thomas Thrainer
      # one without port, so the drbd will be activated without
2429 763ad5be Thomas Thrainer
      # networking information on the new node at this stage, and one
2430 763ad5be Thomas Thrainer
      # with network, for the latter activation in step 4
2431 763ad5be Thomas Thrainer
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
2432 763ad5be Thomas Thrainer
      if self.instance.primary_node == o_node1:
2433 763ad5be Thomas Thrainer
        p_minor = o_minor1
2434 763ad5be Thomas Thrainer
      else:
2435 763ad5be Thomas Thrainer
        assert self.instance.primary_node == o_node2, "Three-node instance?"
2436 763ad5be Thomas Thrainer
        p_minor = o_minor2
2437 763ad5be Thomas Thrainer
2438 763ad5be Thomas Thrainer
      new_alone_id = (self.instance.primary_node, self.new_node, None,
2439 763ad5be Thomas Thrainer
                      p_minor, new_minor, o_secret)
2440 763ad5be Thomas Thrainer
      new_net_id = (self.instance.primary_node, self.new_node, o_port,
2441 763ad5be Thomas Thrainer
                    p_minor, new_minor, o_secret)
2442 763ad5be Thomas Thrainer
2443 763ad5be Thomas Thrainer
      iv_names[idx] = (dev, dev.children, new_net_id)
2444 763ad5be Thomas Thrainer
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
2445 763ad5be Thomas Thrainer
                    new_net_id)
2446 763ad5be Thomas Thrainer
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
2447 763ad5be Thomas Thrainer
                              logical_id=new_alone_id,
2448 763ad5be Thomas Thrainer
                              children=dev.children,
2449 763ad5be Thomas Thrainer
                              size=dev.size,
2450 763ad5be Thomas Thrainer
                              params={})
2451 5eacbcae Thomas Thrainer
      (anno_new_drbd,) = AnnotateDiskParams(self.instance, [new_drbd],
2452 5eacbcae Thomas Thrainer
                                            self.cfg)
2453 763ad5be Thomas Thrainer
      try:
2454 5eacbcae Thomas Thrainer
        CreateSingleBlockDev(self.lu, self.new_node, self.instance,
2455 5eacbcae Thomas Thrainer
                             anno_new_drbd,
2456 5eacbcae Thomas Thrainer
                             GetInstanceInfoText(self.instance), False,
2457 5eacbcae Thomas Thrainer
                             excl_stor)
2458 763ad5be Thomas Thrainer
      except errors.GenericError:
2459 763ad5be Thomas Thrainer
        self.cfg.ReleaseDRBDMinors(self.instance.name)
2460 763ad5be Thomas Thrainer
        raise
2461 763ad5be Thomas Thrainer
2462 763ad5be Thomas Thrainer
    # We have new devices, shutdown the drbd on the old secondary
2463 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2464 763ad5be Thomas Thrainer
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
2465 763ad5be Thomas Thrainer
      self.cfg.SetDiskID(dev, self.target_node)
2466 763ad5be Thomas Thrainer
      msg = self.rpc.call_blockdev_shutdown(self.target_node,
2467 763ad5be Thomas Thrainer
                                            (dev, self.instance)).fail_msg
2468 763ad5be Thomas Thrainer
      if msg:
2469 763ad5be Thomas Thrainer
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
2470 763ad5be Thomas Thrainer
                           "node: %s" % (idx, msg),
2471 763ad5be Thomas Thrainer
                           hint=("Please cleanup this device manually as"
2472 763ad5be Thomas Thrainer
                                 " soon as possible"))
2473 763ad5be Thomas Thrainer
2474 763ad5be Thomas Thrainer
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
2475 763ad5be Thomas Thrainer
    result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
2476 763ad5be Thomas Thrainer
                                               self.instance.disks)[pnode]
2477 763ad5be Thomas Thrainer
2478 763ad5be Thomas Thrainer
    msg = result.fail_msg
2479 763ad5be Thomas Thrainer
    if msg:
2480 763ad5be Thomas Thrainer
      # detaches didn't succeed (unlikely)
2481 763ad5be Thomas Thrainer
      self.cfg.ReleaseDRBDMinors(self.instance.name)
2482 763ad5be Thomas Thrainer
      raise errors.OpExecError("Can't detach the disks from the network on"
2483 763ad5be Thomas Thrainer
                               " old node: %s" % (msg,))
2484 763ad5be Thomas Thrainer
2485 763ad5be Thomas Thrainer
    # if we managed to detach at least one, we update all the disks of
2486 763ad5be Thomas Thrainer
    # the instance to point to the new secondary
2487 763ad5be Thomas Thrainer
    self.lu.LogInfo("Updating instance configuration")
2488 763ad5be Thomas Thrainer
    for dev, _, new_logical_id in iv_names.itervalues():
2489 763ad5be Thomas Thrainer
      dev.logical_id = new_logical_id
2490 763ad5be Thomas Thrainer
      self.cfg.SetDiskID(dev, self.instance.primary_node)
2491 763ad5be Thomas Thrainer
2492 763ad5be Thomas Thrainer
    self.cfg.Update(self.instance, feedback_fn)
2493 763ad5be Thomas Thrainer
2494 763ad5be Thomas Thrainer
    # Release all node locks (the configuration has been updated)
2495 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE)
2496 763ad5be Thomas Thrainer
2497 763ad5be Thomas Thrainer
    # and now perform the drbd attach
2498 763ad5be Thomas Thrainer
    self.lu.LogInfo("Attaching primary drbds to new secondary"
2499 763ad5be Thomas Thrainer
                    " (standalone => connected)")
2500 763ad5be Thomas Thrainer
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
2501 763ad5be Thomas Thrainer
                                            self.new_node],
2502 763ad5be Thomas Thrainer
                                           self.node_secondary_ip,
2503 763ad5be Thomas Thrainer
                                           (self.instance.disks, self.instance),
2504 763ad5be Thomas Thrainer
                                           self.instance.name,
2505 763ad5be Thomas Thrainer
                                           False)
2506 763ad5be Thomas Thrainer
    for to_node, to_result in result.items():
2507 763ad5be Thomas Thrainer
      msg = to_result.fail_msg
2508 763ad5be Thomas Thrainer
      if msg:
2509 763ad5be Thomas Thrainer
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
2510 763ad5be Thomas Thrainer
                           to_node, msg,
2511 763ad5be Thomas Thrainer
                           hint=("please do a gnt-instance info to see the"
2512 763ad5be Thomas Thrainer
                                 " status of disks"))
2513 763ad5be Thomas Thrainer
2514 763ad5be Thomas Thrainer
    cstep = itertools.count(5)
2515 763ad5be Thomas Thrainer
2516 763ad5be Thomas Thrainer
    if self.early_release:
2517 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2518 763ad5be Thomas Thrainer
      self._RemoveOldStorage(self.target_node, iv_names)
2519 763ad5be Thomas Thrainer
      # TODO: Check if releasing locks early still makes sense
2520 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
2521 763ad5be Thomas Thrainer
    else:
2522 763ad5be Thomas Thrainer
      # Release all resource locks except those used by the instance
2523 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
2524 5eacbcae Thomas Thrainer
                   keep=self.node_secondary_ip.keys())
2525 763ad5be Thomas Thrainer
2526 763ad5be Thomas Thrainer
    # TODO: Can the instance lock be downgraded here? Take the optional disk
2527 763ad5be Thomas Thrainer
    # shutdown in the caller into consideration.
2528 763ad5be Thomas Thrainer
2529 763ad5be Thomas Thrainer
    # Wait for sync
2530 763ad5be Thomas Thrainer
    # This can fail as the old devices are degraded and _WaitForSync
2531 763ad5be Thomas Thrainer
    # does a combined result over all disks, so we don't check its return value
2532 763ad5be Thomas Thrainer
    self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
2533 5eacbcae Thomas Thrainer
    WaitForSync(self.lu, self.instance)
2534 763ad5be Thomas Thrainer
2535 763ad5be Thomas Thrainer
    # Check all devices manually
2536 763ad5be Thomas Thrainer
    self._CheckDevices(self.instance.primary_node, iv_names)
2537 763ad5be Thomas Thrainer
2538 763ad5be Thomas Thrainer
    # Step: remove old storage
2539 763ad5be Thomas Thrainer
    if not self.early_release:
2540 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2541 763ad5be Thomas Thrainer
      self._RemoveOldStorage(self.target_node, iv_names)