Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_storage.py @ 70b634e6

History | View | Annotate | Download (98.2 kB)

1 763ad5be Thomas Thrainer
#
2 763ad5be Thomas Thrainer
#
3 763ad5be Thomas Thrainer
4 763ad5be Thomas Thrainer
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 763ad5be Thomas Thrainer
#
6 763ad5be Thomas Thrainer
# This program is free software; you can redistribute it and/or modify
7 763ad5be Thomas Thrainer
# it under the terms of the GNU General Public License as published by
8 763ad5be Thomas Thrainer
# the Free Software Foundation; either version 2 of the License, or
9 763ad5be Thomas Thrainer
# (at your option) any later version.
10 763ad5be Thomas Thrainer
#
11 763ad5be Thomas Thrainer
# This program is distributed in the hope that it will be useful, but
12 763ad5be Thomas Thrainer
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 763ad5be Thomas Thrainer
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 763ad5be Thomas Thrainer
# General Public License for more details.
15 763ad5be Thomas Thrainer
#
16 763ad5be Thomas Thrainer
# You should have received a copy of the GNU General Public License
17 763ad5be Thomas Thrainer
# along with this program; if not, write to the Free Software
18 763ad5be Thomas Thrainer
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 763ad5be Thomas Thrainer
# 02110-1301, USA.
20 763ad5be Thomas Thrainer
21 763ad5be Thomas Thrainer
22 763ad5be Thomas Thrainer
"""Logical units dealing with storage of instances."""
23 763ad5be Thomas Thrainer
24 763ad5be Thomas Thrainer
import itertools
25 763ad5be Thomas Thrainer
import logging
26 763ad5be Thomas Thrainer
import os
27 763ad5be Thomas Thrainer
import time
28 763ad5be Thomas Thrainer
29 763ad5be Thomas Thrainer
from ganeti import compat
30 763ad5be Thomas Thrainer
from ganeti import constants
31 763ad5be Thomas Thrainer
from ganeti import errors
32 763ad5be Thomas Thrainer
from ganeti import ht
33 763ad5be Thomas Thrainer
from ganeti import locking
34 763ad5be Thomas Thrainer
from ganeti.masterd import iallocator
35 763ad5be Thomas Thrainer
from ganeti import objects
36 763ad5be Thomas Thrainer
from ganeti import utils
37 763ad5be Thomas Thrainer
from ganeti import rpc
38 763ad5be Thomas Thrainer
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
39 763ad5be Thomas Thrainer
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
40 1c3231aa Thomas Thrainer
  AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeUuidAndName, \
41 5eacbcae Thomas Thrainer
  CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
42 1f7c8208 Helga Velroyen
  IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes, \
43 1f7c8208 Helga Velroyen
  CheckDiskTemplateEnabled
44 5eacbcae Thomas Thrainer
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
45 5eacbcae Thomas Thrainer
  CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
46 5eacbcae Thomas Thrainer
  BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
47 763ad5be Thomas Thrainer
48 763ad5be Thomas Thrainer
import ganeti.masterd.instance
49 763ad5be Thomas Thrainer
50 763ad5be Thomas Thrainer
51 763ad5be Thomas Thrainer
_DISK_TEMPLATE_NAME_PREFIX = {
52 763ad5be Thomas Thrainer
  constants.DT_PLAIN: "",
53 763ad5be Thomas Thrainer
  constants.DT_RBD: ".rbd",
54 763ad5be Thomas Thrainer
  constants.DT_EXT: ".ext",
55 763ad5be Thomas Thrainer
  }
56 763ad5be Thomas Thrainer
57 763ad5be Thomas Thrainer
58 1c3231aa Thomas Thrainer
def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
59 5eacbcae Thomas Thrainer
                         excl_stor):
60 763ad5be Thomas Thrainer
  """Create a single block device on a given node.
61 763ad5be Thomas Thrainer

62 763ad5be Thomas Thrainer
  This will not recurse over children of the device, so they must be
63 763ad5be Thomas Thrainer
  created in advance.
64 763ad5be Thomas Thrainer

65 763ad5be Thomas Thrainer
  @param lu: the lu on whose behalf we execute
66 1c3231aa Thomas Thrainer
  @param node_uuid: the node on which to create the device
67 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
68 763ad5be Thomas Thrainer
  @param instance: the instance which owns the device
69 763ad5be Thomas Thrainer
  @type device: L{objects.Disk}
70 763ad5be Thomas Thrainer
  @param device: the device to create
71 763ad5be Thomas Thrainer
  @param info: the extra 'metadata' we should attach to the device
72 763ad5be Thomas Thrainer
      (this will be represented as a LVM tag)
73 763ad5be Thomas Thrainer
  @type force_open: boolean
74 763ad5be Thomas Thrainer
  @param force_open: this parameter will be passes to the
75 763ad5be Thomas Thrainer
      L{backend.BlockdevCreate} function where it specifies
76 763ad5be Thomas Thrainer
      whether we run on primary or not, and it affects both
77 763ad5be Thomas Thrainer
      the child assembly and the device own Open() execution
78 763ad5be Thomas Thrainer
  @type excl_stor: boolean
79 763ad5be Thomas Thrainer
  @param excl_stor: Whether exclusive_storage is active for the node
80 763ad5be Thomas Thrainer

81 763ad5be Thomas Thrainer
  """
82 0c3d9c7c Thomas Thrainer
  result = lu.rpc.call_blockdev_create(node_uuid, (device, instance),
83 0c3d9c7c Thomas Thrainer
                                       device.size, instance.name, force_open,
84 0c3d9c7c Thomas Thrainer
                                       info, excl_stor)
85 763ad5be Thomas Thrainer
  result.Raise("Can't create block device %s on"
86 1c3231aa Thomas Thrainer
               " node %s for instance %s" % (device,
87 1c3231aa Thomas Thrainer
                                             lu.cfg.GetNodeName(node_uuid),
88 1c3231aa Thomas Thrainer
                                             instance.name))
89 763ad5be Thomas Thrainer
90 763ad5be Thomas Thrainer
91 1c3231aa Thomas Thrainer
def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create,
92 763ad5be Thomas Thrainer
                         info, force_open, excl_stor):
93 763ad5be Thomas Thrainer
  """Create a tree of block devices on a given node.
94 763ad5be Thomas Thrainer

95 763ad5be Thomas Thrainer
  If this device type has to be created on secondaries, create it and
96 763ad5be Thomas Thrainer
  all its children.
97 763ad5be Thomas Thrainer

98 763ad5be Thomas Thrainer
  If not, just recurse to children keeping the same 'force' value.
99 763ad5be Thomas Thrainer

100 763ad5be Thomas Thrainer
  @attention: The device has to be annotated already.
101 763ad5be Thomas Thrainer

102 763ad5be Thomas Thrainer
  @param lu: the lu on whose behalf we execute
103 1c3231aa Thomas Thrainer
  @param node_uuid: the node on which to create the device
104 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
105 763ad5be Thomas Thrainer
  @param instance: the instance which owns the device
106 763ad5be Thomas Thrainer
  @type device: L{objects.Disk}
107 763ad5be Thomas Thrainer
  @param device: the device to create
108 763ad5be Thomas Thrainer
  @type force_create: boolean
109 763ad5be Thomas Thrainer
  @param force_create: whether to force creation of this device; this
110 763ad5be Thomas Thrainer
      will be change to True whenever we find a device which has
111 763ad5be Thomas Thrainer
      CreateOnSecondary() attribute
112 763ad5be Thomas Thrainer
  @param info: the extra 'metadata' we should attach to the device
113 763ad5be Thomas Thrainer
      (this will be represented as a LVM tag)
114 763ad5be Thomas Thrainer
  @type force_open: boolean
115 763ad5be Thomas Thrainer
  @param force_open: this parameter will be passes to the
116 763ad5be Thomas Thrainer
      L{backend.BlockdevCreate} function where it specifies
117 763ad5be Thomas Thrainer
      whether we run on primary or not, and it affects both
118 763ad5be Thomas Thrainer
      the child assembly and the device own Open() execution
119 763ad5be Thomas Thrainer
  @type excl_stor: boolean
120 763ad5be Thomas Thrainer
  @param excl_stor: Whether exclusive_storage is active for the node
121 763ad5be Thomas Thrainer

122 763ad5be Thomas Thrainer
  @return: list of created devices
123 763ad5be Thomas Thrainer
  """
124 763ad5be Thomas Thrainer
  created_devices = []
125 763ad5be Thomas Thrainer
  try:
126 763ad5be Thomas Thrainer
    if device.CreateOnSecondary():
127 763ad5be Thomas Thrainer
      force_create = True
128 763ad5be Thomas Thrainer
129 763ad5be Thomas Thrainer
    if device.children:
130 763ad5be Thomas Thrainer
      for child in device.children:
131 1c3231aa Thomas Thrainer
        devs = _CreateBlockDevInner(lu, node_uuid, instance, child,
132 1c3231aa Thomas Thrainer
                                    force_create, info, force_open, excl_stor)
133 763ad5be Thomas Thrainer
        created_devices.extend(devs)
134 763ad5be Thomas Thrainer
135 763ad5be Thomas Thrainer
    if not force_create:
136 763ad5be Thomas Thrainer
      return created_devices
137 763ad5be Thomas Thrainer
138 1c3231aa Thomas Thrainer
    CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
139 5eacbcae Thomas Thrainer
                         excl_stor)
140 763ad5be Thomas Thrainer
    # The device has been completely created, so there is no point in keeping
141 763ad5be Thomas Thrainer
    # its subdevices in the list. We just add the device itself instead.
142 1c3231aa Thomas Thrainer
    created_devices = [(node_uuid, device)]
143 763ad5be Thomas Thrainer
    return created_devices
144 763ad5be Thomas Thrainer
145 763ad5be Thomas Thrainer
  except errors.DeviceCreationError, e:
146 763ad5be Thomas Thrainer
    e.created_devices.extend(created_devices)
147 763ad5be Thomas Thrainer
    raise e
148 763ad5be Thomas Thrainer
  except errors.OpExecError, e:
149 763ad5be Thomas Thrainer
    raise errors.DeviceCreationError(str(e), created_devices)
150 763ad5be Thomas Thrainer
151 763ad5be Thomas Thrainer
152 1c3231aa Thomas Thrainer
def IsExclusiveStorageEnabledNodeUuid(cfg, node_uuid):
153 763ad5be Thomas Thrainer
  """Whether exclusive_storage is in effect for the given node.
154 763ad5be Thomas Thrainer

155 763ad5be Thomas Thrainer
  @type cfg: L{config.ConfigWriter}
156 763ad5be Thomas Thrainer
  @param cfg: The cluster configuration
157 1c3231aa Thomas Thrainer
  @type node_uuid: string
158 1c3231aa Thomas Thrainer
  @param node_uuid: The node UUID
159 763ad5be Thomas Thrainer
  @rtype: bool
160 763ad5be Thomas Thrainer
  @return: The effective value of exclusive_storage
161 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if no node exists with the given name
162 763ad5be Thomas Thrainer

163 763ad5be Thomas Thrainer
  """
164 1c3231aa Thomas Thrainer
  ni = cfg.GetNodeInfo(node_uuid)
165 763ad5be Thomas Thrainer
  if ni is None:
166 1c3231aa Thomas Thrainer
    raise errors.OpPrereqError("Invalid node UUID %s" % node_uuid,
167 763ad5be Thomas Thrainer
                               errors.ECODE_NOENT)
168 5eacbcae Thomas Thrainer
  return IsExclusiveStorageEnabledNode(cfg, ni)
169 763ad5be Thomas Thrainer
170 763ad5be Thomas Thrainer
171 1c3231aa Thomas Thrainer
def _CreateBlockDev(lu, node_uuid, instance, device, force_create, info,
172 763ad5be Thomas Thrainer
                    force_open):
173 763ad5be Thomas Thrainer
  """Wrapper around L{_CreateBlockDevInner}.
174 763ad5be Thomas Thrainer

175 763ad5be Thomas Thrainer
  This method annotates the root device first.
176 763ad5be Thomas Thrainer

177 763ad5be Thomas Thrainer
  """
178 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [device], lu.cfg)
179 1c3231aa Thomas Thrainer
  excl_stor = IsExclusiveStorageEnabledNodeUuid(lu.cfg, node_uuid)
180 1c3231aa Thomas Thrainer
  return _CreateBlockDevInner(lu, node_uuid, instance, disk, force_create, info,
181 763ad5be Thomas Thrainer
                              force_open, excl_stor)
182 763ad5be Thomas Thrainer
183 763ad5be Thomas Thrainer
184 0c3d9c7c Thomas Thrainer
def _UndoCreateDisks(lu, disks_created, instance):
185 a365b47f Bernardo Dal Seno
  """Undo the work performed by L{CreateDisks}.
186 a365b47f Bernardo Dal Seno

187 a365b47f Bernardo Dal Seno
  This function is called in case of an error to undo the work of
188 a365b47f Bernardo Dal Seno
  L{CreateDisks}.
189 a365b47f Bernardo Dal Seno

190 a365b47f Bernardo Dal Seno
  @type lu: L{LogicalUnit}
191 a365b47f Bernardo Dal Seno
  @param lu: the logical unit on whose behalf we execute
192 a365b47f Bernardo Dal Seno
  @param disks_created: the result returned by L{CreateDisks}
193 0c3d9c7c Thomas Thrainer
  @type instance: L{objects.Instance}
194 0c3d9c7c Thomas Thrainer
  @param instance: the instance for which disks were created
195 a365b47f Bernardo Dal Seno

196 a365b47f Bernardo Dal Seno
  """
197 1c3231aa Thomas Thrainer
  for (node_uuid, disk) in disks_created:
198 0c3d9c7c Thomas Thrainer
    result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
199 c7dd65be Klaus Aehlig
    result.Warn("Failed to remove newly-created disk %s on node %s" %
200 1c3231aa Thomas Thrainer
                (disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
201 a365b47f Bernardo Dal Seno
202 a365b47f Bernardo Dal Seno
203 1c3231aa Thomas Thrainer
def CreateDisks(lu, instance, to_skip=None, target_node_uuid=None, disks=None):
204 763ad5be Thomas Thrainer
  """Create all disks for an instance.
205 763ad5be Thomas Thrainer

206 763ad5be Thomas Thrainer
  This abstracts away some work from AddInstance.
207 763ad5be Thomas Thrainer

208 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
209 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
210 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
211 763ad5be Thomas Thrainer
  @param instance: the instance whose disks we should create
212 763ad5be Thomas Thrainer
  @type to_skip: list
213 763ad5be Thomas Thrainer
  @param to_skip: list of indices to skip
214 1c3231aa Thomas Thrainer
  @type target_node_uuid: string
215 1c3231aa Thomas Thrainer
  @param target_node_uuid: if passed, overrides the target node for creation
216 a365b47f Bernardo Dal Seno
  @type disks: list of {objects.Disk}
217 a365b47f Bernardo Dal Seno
  @param disks: the disks to create; if not specified, all the disks of the
218 a365b47f Bernardo Dal Seno
      instance are created
219 a365b47f Bernardo Dal Seno
  @return: information about the created disks, to be used to call
220 a365b47f Bernardo Dal Seno
      L{_UndoCreateDisks}
221 a365b47f Bernardo Dal Seno
  @raise errors.OpPrereqError: in case of error
222 763ad5be Thomas Thrainer

223 763ad5be Thomas Thrainer
  """
224 5eacbcae Thomas Thrainer
  info = GetInstanceInfoText(instance)
225 1c3231aa Thomas Thrainer
  if target_node_uuid is None:
226 1c3231aa Thomas Thrainer
    pnode_uuid = instance.primary_node
227 1c3231aa Thomas Thrainer
    all_node_uuids = instance.all_nodes
228 763ad5be Thomas Thrainer
  else:
229 1c3231aa Thomas Thrainer
    pnode_uuid = target_node_uuid
230 1c3231aa Thomas Thrainer
    all_node_uuids = [pnode_uuid]
231 763ad5be Thomas Thrainer
232 a365b47f Bernardo Dal Seno
  if disks is None:
233 a365b47f Bernardo Dal Seno
    disks = instance.disks
234 a365b47f Bernardo Dal Seno
235 1f7c8208 Helga Velroyen
  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
236 1f7c8208 Helga Velroyen
237 763ad5be Thomas Thrainer
  if instance.disk_template in constants.DTS_FILEBASED:
238 763ad5be Thomas Thrainer
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
239 1c3231aa Thomas Thrainer
    result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
240 763ad5be Thomas Thrainer
241 763ad5be Thomas Thrainer
    result.Raise("Failed to create directory '%s' on"
242 1c3231aa Thomas Thrainer
                 " node %s" % (file_storage_dir,
243 1c3231aa Thomas Thrainer
                               lu.cfg.GetNodeName(pnode_uuid)))
244 763ad5be Thomas Thrainer
245 763ad5be Thomas Thrainer
  disks_created = []
246 a365b47f Bernardo Dal Seno
  for idx, device in enumerate(disks):
247 763ad5be Thomas Thrainer
    if to_skip and idx in to_skip:
248 763ad5be Thomas Thrainer
      continue
249 763ad5be Thomas Thrainer
    logging.info("Creating disk %s for instance '%s'", idx, instance.name)
250 1c3231aa Thomas Thrainer
    for node_uuid in all_node_uuids:
251 1c3231aa Thomas Thrainer
      f_create = node_uuid == pnode_uuid
252 763ad5be Thomas Thrainer
      try:
253 1c3231aa Thomas Thrainer
        _CreateBlockDev(lu, node_uuid, instance, device, f_create, info,
254 1c3231aa Thomas Thrainer
                        f_create)
255 1c3231aa Thomas Thrainer
        disks_created.append((node_uuid, device))
256 763ad5be Thomas Thrainer
      except errors.DeviceCreationError, e:
257 763ad5be Thomas Thrainer
        logging.warning("Creating disk %s for instance '%s' failed",
258 763ad5be Thomas Thrainer
                        idx, instance.name)
259 763ad5be Thomas Thrainer
        disks_created.extend(e.created_devices)
260 0c3d9c7c Thomas Thrainer
        _UndoCreateDisks(lu, disks_created, instance)
261 763ad5be Thomas Thrainer
        raise errors.OpExecError(e.message)
262 a365b47f Bernardo Dal Seno
  return disks_created
263 763ad5be Thomas Thrainer
264 763ad5be Thomas Thrainer
265 5eacbcae Thomas Thrainer
def ComputeDiskSizePerVG(disk_template, disks):
266 763ad5be Thomas Thrainer
  """Compute disk size requirements in the volume group
267 763ad5be Thomas Thrainer

268 763ad5be Thomas Thrainer
  """
269 763ad5be Thomas Thrainer
  def _compute(disks, payload):
270 763ad5be Thomas Thrainer
    """Universal algorithm.
271 763ad5be Thomas Thrainer

272 763ad5be Thomas Thrainer
    """
273 763ad5be Thomas Thrainer
    vgs = {}
274 763ad5be Thomas Thrainer
    for disk in disks:
275 763ad5be Thomas Thrainer
      vgs[disk[constants.IDISK_VG]] = \
276 763ad5be Thomas Thrainer
        vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
277 763ad5be Thomas Thrainer
278 763ad5be Thomas Thrainer
    return vgs
279 763ad5be Thomas Thrainer
280 763ad5be Thomas Thrainer
  # Required free disk space as a function of disk and swap space
281 763ad5be Thomas Thrainer
  req_size_dict = {
282 763ad5be Thomas Thrainer
    constants.DT_DISKLESS: {},
283 763ad5be Thomas Thrainer
    constants.DT_PLAIN: _compute(disks, 0),
284 763ad5be Thomas Thrainer
    # 128 MB are added for drbd metadata for each disk
285 763ad5be Thomas Thrainer
    constants.DT_DRBD8: _compute(disks, constants.DRBD_META_SIZE),
286 763ad5be Thomas Thrainer
    constants.DT_FILE: {},
287 763ad5be Thomas Thrainer
    constants.DT_SHARED_FILE: {},
288 763ad5be Thomas Thrainer
    }
289 763ad5be Thomas Thrainer
290 763ad5be Thomas Thrainer
  if disk_template not in req_size_dict:
291 763ad5be Thomas Thrainer
    raise errors.ProgrammerError("Disk template '%s' size requirement"
292 763ad5be Thomas Thrainer
                                 " is unknown" % disk_template)
293 763ad5be Thomas Thrainer
294 763ad5be Thomas Thrainer
  return req_size_dict[disk_template]
295 763ad5be Thomas Thrainer
296 763ad5be Thomas Thrainer
297 5eacbcae Thomas Thrainer
def ComputeDisks(op, default_vg):
298 763ad5be Thomas Thrainer
  """Computes the instance disks.
299 763ad5be Thomas Thrainer

300 763ad5be Thomas Thrainer
  @param op: The instance opcode
301 763ad5be Thomas Thrainer
  @param default_vg: The default_vg to assume
302 763ad5be Thomas Thrainer

303 763ad5be Thomas Thrainer
  @return: The computed disks
304 763ad5be Thomas Thrainer

305 763ad5be Thomas Thrainer
  """
306 763ad5be Thomas Thrainer
  disks = []
307 763ad5be Thomas Thrainer
  for disk in op.disks:
308 763ad5be Thomas Thrainer
    mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
309 763ad5be Thomas Thrainer
    if mode not in constants.DISK_ACCESS_SET:
310 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk access mode '%s'" %
311 763ad5be Thomas Thrainer
                                 mode, errors.ECODE_INVAL)
312 763ad5be Thomas Thrainer
    size = disk.get(constants.IDISK_SIZE, None)
313 763ad5be Thomas Thrainer
    if size is None:
314 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
315 763ad5be Thomas Thrainer
    try:
316 763ad5be Thomas Thrainer
      size = int(size)
317 763ad5be Thomas Thrainer
    except (TypeError, ValueError):
318 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk size '%s'" % size,
319 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
320 763ad5be Thomas Thrainer
321 763ad5be Thomas Thrainer
    ext_provider = disk.get(constants.IDISK_PROVIDER, None)
322 763ad5be Thomas Thrainer
    if ext_provider and op.disk_template != constants.DT_EXT:
323 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The '%s' option is only valid for the %s"
324 763ad5be Thomas Thrainer
                                 " disk template, not %s" %
325 763ad5be Thomas Thrainer
                                 (constants.IDISK_PROVIDER, constants.DT_EXT,
326 763ad5be Thomas Thrainer
                                  op.disk_template), errors.ECODE_INVAL)
327 763ad5be Thomas Thrainer
328 763ad5be Thomas Thrainer
    data_vg = disk.get(constants.IDISK_VG, default_vg)
329 763ad5be Thomas Thrainer
    name = disk.get(constants.IDISK_NAME, None)
330 763ad5be Thomas Thrainer
    if name is not None and name.lower() == constants.VALUE_NONE:
331 763ad5be Thomas Thrainer
      name = None
332 763ad5be Thomas Thrainer
    new_disk = {
333 763ad5be Thomas Thrainer
      constants.IDISK_SIZE: size,
334 763ad5be Thomas Thrainer
      constants.IDISK_MODE: mode,
335 763ad5be Thomas Thrainer
      constants.IDISK_VG: data_vg,
336 763ad5be Thomas Thrainer
      constants.IDISK_NAME: name,
337 763ad5be Thomas Thrainer
      }
338 763ad5be Thomas Thrainer
339 3f3ea14c Bernardo Dal Seno
    for key in [
340 3f3ea14c Bernardo Dal Seno
      constants.IDISK_METAVG,
341 3f3ea14c Bernardo Dal Seno
      constants.IDISK_ADOPT,
342 3f3ea14c Bernardo Dal Seno
      constants.IDISK_SPINDLES,
343 3f3ea14c Bernardo Dal Seno
      ]:
344 3f3ea14c Bernardo Dal Seno
      if key in disk:
345 3f3ea14c Bernardo Dal Seno
        new_disk[key] = disk[key]
346 763ad5be Thomas Thrainer
347 763ad5be Thomas Thrainer
    # For extstorage, demand the `provider' option and add any
348 763ad5be Thomas Thrainer
    # additional parameters (ext-params) to the dict
349 763ad5be Thomas Thrainer
    if op.disk_template == constants.DT_EXT:
350 763ad5be Thomas Thrainer
      if ext_provider:
351 763ad5be Thomas Thrainer
        new_disk[constants.IDISK_PROVIDER] = ext_provider
352 763ad5be Thomas Thrainer
        for key in disk:
353 763ad5be Thomas Thrainer
          if key not in constants.IDISK_PARAMS:
354 763ad5be Thomas Thrainer
            new_disk[key] = disk[key]
355 763ad5be Thomas Thrainer
      else:
356 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Missing provider for template '%s'" %
357 763ad5be Thomas Thrainer
                                   constants.DT_EXT, errors.ECODE_INVAL)
358 763ad5be Thomas Thrainer
359 763ad5be Thomas Thrainer
    disks.append(new_disk)
360 763ad5be Thomas Thrainer
361 763ad5be Thomas Thrainer
  return disks
362 763ad5be Thomas Thrainer
363 763ad5be Thomas Thrainer
364 5eacbcae Thomas Thrainer
def CheckRADOSFreeSpace():
365 763ad5be Thomas Thrainer
  """Compute disk size requirements inside the RADOS cluster.
366 763ad5be Thomas Thrainer

367 763ad5be Thomas Thrainer
  """
368 763ad5be Thomas Thrainer
  # For the RADOS cluster we assume there is always enough space.
369 763ad5be Thomas Thrainer
  pass
370 763ad5be Thomas Thrainer
371 763ad5be Thomas Thrainer
372 1c3231aa Thomas Thrainer
def _GenerateDRBD8Branch(lu, primary_uuid, secondary_uuid, size, vgnames, names,
373 763ad5be Thomas Thrainer
                         iv_name, p_minor, s_minor):
374 763ad5be Thomas Thrainer
  """Generate a drbd8 device complete with its children.
375 763ad5be Thomas Thrainer

376 763ad5be Thomas Thrainer
  """
377 763ad5be Thomas Thrainer
  assert len(vgnames) == len(names) == 2
378 763ad5be Thomas Thrainer
  port = lu.cfg.AllocatePort()
379 763ad5be Thomas Thrainer
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
380 763ad5be Thomas Thrainer
381 cd3b4ff4 Helga Velroyen
  dev_data = objects.Disk(dev_type=constants.DT_PLAIN, size=size,
382 763ad5be Thomas Thrainer
                          logical_id=(vgnames[0], names[0]),
383 763ad5be Thomas Thrainer
                          params={})
384 763ad5be Thomas Thrainer
  dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
385 cd3b4ff4 Helga Velroyen
  dev_meta = objects.Disk(dev_type=constants.DT_PLAIN,
386 763ad5be Thomas Thrainer
                          size=constants.DRBD_META_SIZE,
387 763ad5be Thomas Thrainer
                          logical_id=(vgnames[1], names[1]),
388 763ad5be Thomas Thrainer
                          params={})
389 763ad5be Thomas Thrainer
  dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
390 cd3b4ff4 Helga Velroyen
  drbd_dev = objects.Disk(dev_type=constants.DT_DRBD8, size=size,
391 1c3231aa Thomas Thrainer
                          logical_id=(primary_uuid, secondary_uuid, port,
392 763ad5be Thomas Thrainer
                                      p_minor, s_minor,
393 763ad5be Thomas Thrainer
                                      shared_secret),
394 763ad5be Thomas Thrainer
                          children=[dev_data, dev_meta],
395 763ad5be Thomas Thrainer
                          iv_name=iv_name, params={})
396 763ad5be Thomas Thrainer
  drbd_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
397 763ad5be Thomas Thrainer
  return drbd_dev
398 763ad5be Thomas Thrainer
399 763ad5be Thomas Thrainer
400 5eacbcae Thomas Thrainer
def GenerateDiskTemplate(
401 da4a52a3 Thomas Thrainer
  lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
402 763ad5be Thomas Thrainer
  disk_info, file_storage_dir, file_driver, base_index,
403 850c53f1 Helga Velroyen
  feedback_fn, full_disk_params):
404 763ad5be Thomas Thrainer
  """Generate the entire disk layout for a given template type.
405 763ad5be Thomas Thrainer

406 763ad5be Thomas Thrainer
  """
407 763ad5be Thomas Thrainer
  vgname = lu.cfg.GetVGName()
408 763ad5be Thomas Thrainer
  disk_count = len(disk_info)
409 763ad5be Thomas Thrainer
  disks = []
410 763ad5be Thomas Thrainer
411 850c53f1 Helga Velroyen
  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name)
412 850c53f1 Helga Velroyen
413 763ad5be Thomas Thrainer
  if template_name == constants.DT_DISKLESS:
414 763ad5be Thomas Thrainer
    pass
415 763ad5be Thomas Thrainer
  elif template_name == constants.DT_DRBD8:
416 1c3231aa Thomas Thrainer
    if len(secondary_node_uuids) != 1:
417 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Wrong template configuration")
418 1c3231aa Thomas Thrainer
    remote_node_uuid = secondary_node_uuids[0]
419 763ad5be Thomas Thrainer
    minors = lu.cfg.AllocateDRBDMinor(
420 da4a52a3 Thomas Thrainer
      [primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid)
421 763ad5be Thomas Thrainer
422 763ad5be Thomas Thrainer
    (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
423 763ad5be Thomas Thrainer
                                                       full_disk_params)
424 763ad5be Thomas Thrainer
    drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
425 763ad5be Thomas Thrainer
426 763ad5be Thomas Thrainer
    names = []
427 763ad5be Thomas Thrainer
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
428 763ad5be Thomas Thrainer
                                               for i in range(disk_count)]):
429 763ad5be Thomas Thrainer
      names.append(lv_prefix + "_data")
430 763ad5be Thomas Thrainer
      names.append(lv_prefix + "_meta")
431 763ad5be Thomas Thrainer
    for idx, disk in enumerate(disk_info):
432 763ad5be Thomas Thrainer
      disk_index = idx + base_index
433 763ad5be Thomas Thrainer
      data_vg = disk.get(constants.IDISK_VG, vgname)
434 763ad5be Thomas Thrainer
      meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
435 1c3231aa Thomas Thrainer
      disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid,
436 763ad5be Thomas Thrainer
                                      disk[constants.IDISK_SIZE],
437 763ad5be Thomas Thrainer
                                      [data_vg, meta_vg],
438 763ad5be Thomas Thrainer
                                      names[idx * 2:idx * 2 + 2],
439 763ad5be Thomas Thrainer
                                      "disk/%d" % disk_index,
440 763ad5be Thomas Thrainer
                                      minors[idx * 2], minors[idx * 2 + 1])
441 763ad5be Thomas Thrainer
      disk_dev.mode = disk[constants.IDISK_MODE]
442 763ad5be Thomas Thrainer
      disk_dev.name = disk.get(constants.IDISK_NAME, None)
443 763ad5be Thomas Thrainer
      disks.append(disk_dev)
444 763ad5be Thomas Thrainer
  else:
445 1c3231aa Thomas Thrainer
    if secondary_node_uuids:
446 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Wrong template configuration")
447 763ad5be Thomas Thrainer
448 763ad5be Thomas Thrainer
    name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
449 763ad5be Thomas Thrainer
    if name_prefix is None:
450 763ad5be Thomas Thrainer
      names = None
451 763ad5be Thomas Thrainer
    else:
452 763ad5be Thomas Thrainer
      names = _GenerateUniqueNames(lu, ["%s.disk%s" %
453 763ad5be Thomas Thrainer
                                        (name_prefix, base_index + i)
454 763ad5be Thomas Thrainer
                                        for i in range(disk_count)])
455 763ad5be Thomas Thrainer
456 763ad5be Thomas Thrainer
    if template_name == constants.DT_PLAIN:
457 763ad5be Thomas Thrainer
458 763ad5be Thomas Thrainer
      def logical_id_fn(idx, _, disk):
459 763ad5be Thomas Thrainer
        vg = disk.get(constants.IDISK_VG, vgname)
460 763ad5be Thomas Thrainer
        return (vg, names[idx])
461 763ad5be Thomas Thrainer
462 763ad5be Thomas Thrainer
    elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
463 763ad5be Thomas Thrainer
      logical_id_fn = \
464 763ad5be Thomas Thrainer
        lambda _, disk_index, disk: (file_driver,
465 763ad5be Thomas Thrainer
                                     "%s/disk%d" % (file_storage_dir,
466 763ad5be Thomas Thrainer
                                                    disk_index))
467 763ad5be Thomas Thrainer
    elif template_name == constants.DT_BLOCK:
468 763ad5be Thomas Thrainer
      logical_id_fn = \
469 763ad5be Thomas Thrainer
        lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
470 763ad5be Thomas Thrainer
                                       disk[constants.IDISK_ADOPT])
471 763ad5be Thomas Thrainer
    elif template_name == constants.DT_RBD:
472 763ad5be Thomas Thrainer
      logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
473 763ad5be Thomas Thrainer
    elif template_name == constants.DT_EXT:
474 763ad5be Thomas Thrainer
      def logical_id_fn(idx, _, disk):
475 763ad5be Thomas Thrainer
        provider = disk.get(constants.IDISK_PROVIDER, None)
476 763ad5be Thomas Thrainer
        if provider is None:
477 763ad5be Thomas Thrainer
          raise errors.ProgrammerError("Disk template is %s, but '%s' is"
478 763ad5be Thomas Thrainer
                                       " not found", constants.DT_EXT,
479 763ad5be Thomas Thrainer
                                       constants.IDISK_PROVIDER)
480 763ad5be Thomas Thrainer
        return (provider, names[idx])
481 763ad5be Thomas Thrainer
    else:
482 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
483 763ad5be Thomas Thrainer
484 cd3b4ff4 Helga Velroyen
    dev_type = template_name
485 763ad5be Thomas Thrainer
486 763ad5be Thomas Thrainer
    for idx, disk in enumerate(disk_info):
487 763ad5be Thomas Thrainer
      params = {}
488 763ad5be Thomas Thrainer
      # Only for the Ext template add disk_info to params
489 763ad5be Thomas Thrainer
      if template_name == constants.DT_EXT:
490 763ad5be Thomas Thrainer
        params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
491 763ad5be Thomas Thrainer
        for key in disk:
492 763ad5be Thomas Thrainer
          if key not in constants.IDISK_PARAMS:
493 763ad5be Thomas Thrainer
            params[key] = disk[key]
494 763ad5be Thomas Thrainer
      disk_index = idx + base_index
495 763ad5be Thomas Thrainer
      size = disk[constants.IDISK_SIZE]
496 763ad5be Thomas Thrainer
      feedback_fn("* disk %s, size %s" %
497 763ad5be Thomas Thrainer
                  (disk_index, utils.FormatUnit(size, "h")))
498 763ad5be Thomas Thrainer
      disk_dev = objects.Disk(dev_type=dev_type, size=size,
499 763ad5be Thomas Thrainer
                              logical_id=logical_id_fn(idx, disk_index, disk),
500 763ad5be Thomas Thrainer
                              iv_name="disk/%d" % disk_index,
501 763ad5be Thomas Thrainer
                              mode=disk[constants.IDISK_MODE],
502 b54ecf12 Bernardo Dal Seno
                              params=params,
503 b54ecf12 Bernardo Dal Seno
                              spindles=disk.get(constants.IDISK_SPINDLES))
504 763ad5be Thomas Thrainer
      disk_dev.name = disk.get(constants.IDISK_NAME, None)
505 763ad5be Thomas Thrainer
      disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
506 763ad5be Thomas Thrainer
      disks.append(disk_dev)
507 763ad5be Thomas Thrainer
508 763ad5be Thomas Thrainer
  return disks
509 763ad5be Thomas Thrainer
510 763ad5be Thomas Thrainer
511 7c848a6a Bernardo Dal Seno
def CheckSpindlesExclusiveStorage(diskdict, es_flag, required):
512 3f3ea14c Bernardo Dal Seno
  """Check the presence of the spindle options with exclusive_storage.
513 3f3ea14c Bernardo Dal Seno

514 3f3ea14c Bernardo Dal Seno
  @type diskdict: dict
515 3f3ea14c Bernardo Dal Seno
  @param diskdict: disk parameters
516 3f3ea14c Bernardo Dal Seno
  @type es_flag: bool
517 3f3ea14c Bernardo Dal Seno
  @param es_flag: the effective value of the exlusive_storage flag
518 7c848a6a Bernardo Dal Seno
  @type required: bool
519 7c848a6a Bernardo Dal Seno
  @param required: whether spindles are required or just optional
520 3f3ea14c Bernardo Dal Seno
  @raise errors.OpPrereqError when spindles are given and they should not
521 3f3ea14c Bernardo Dal Seno

522 3f3ea14c Bernardo Dal Seno
  """
523 3f3ea14c Bernardo Dal Seno
  if (not es_flag and constants.IDISK_SPINDLES in diskdict and
524 3f3ea14c Bernardo Dal Seno
      diskdict[constants.IDISK_SPINDLES] is not None):
525 3f3ea14c Bernardo Dal Seno
    raise errors.OpPrereqError("Spindles in instance disks cannot be specified"
526 3f3ea14c Bernardo Dal Seno
                               " when exclusive storage is not active",
527 3f3ea14c Bernardo Dal Seno
                               errors.ECODE_INVAL)
528 7c848a6a Bernardo Dal Seno
  if (es_flag and required and (constants.IDISK_SPINDLES not in diskdict or
529 7c848a6a Bernardo Dal Seno
                                diskdict[constants.IDISK_SPINDLES] is None)):
530 7c848a6a Bernardo Dal Seno
    raise errors.OpPrereqError("You must specify spindles in instance disks"
531 7c848a6a Bernardo Dal Seno
                               " when exclusive storage is active",
532 7c848a6a Bernardo Dal Seno
                               errors.ECODE_INVAL)
533 3f3ea14c Bernardo Dal Seno
534 3f3ea14c Bernardo Dal Seno
535 763ad5be Thomas Thrainer
class LUInstanceRecreateDisks(LogicalUnit):
536 763ad5be Thomas Thrainer
  """Recreate an instance's missing disks.
537 763ad5be Thomas Thrainer

538 763ad5be Thomas Thrainer
  """
539 763ad5be Thomas Thrainer
  HPATH = "instance-recreate-disks"
540 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
541 763ad5be Thomas Thrainer
  REQ_BGL = False
542 763ad5be Thomas Thrainer
543 763ad5be Thomas Thrainer
  _MODIFYABLE = compat.UniqueFrozenset([
544 763ad5be Thomas Thrainer
    constants.IDISK_SIZE,
545 763ad5be Thomas Thrainer
    constants.IDISK_MODE,
546 c615590c Bernardo Dal Seno
    constants.IDISK_SPINDLES,
547 763ad5be Thomas Thrainer
    ])
548 763ad5be Thomas Thrainer
549 763ad5be Thomas Thrainer
  # New or changed disk parameters may have different semantics
550 763ad5be Thomas Thrainer
  assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
551 763ad5be Thomas Thrainer
    constants.IDISK_ADOPT,
552 763ad5be Thomas Thrainer
553 763ad5be Thomas Thrainer
    # TODO: Implement support changing VG while recreating
554 763ad5be Thomas Thrainer
    constants.IDISK_VG,
555 763ad5be Thomas Thrainer
    constants.IDISK_METAVG,
556 763ad5be Thomas Thrainer
    constants.IDISK_PROVIDER,
557 763ad5be Thomas Thrainer
    constants.IDISK_NAME,
558 763ad5be Thomas Thrainer
    ]))
559 763ad5be Thomas Thrainer
560 763ad5be Thomas Thrainer
  def _RunAllocator(self):
561 763ad5be Thomas Thrainer
    """Run the allocator based on input opcode.
562 763ad5be Thomas Thrainer

563 763ad5be Thomas Thrainer
    """
564 763ad5be Thomas Thrainer
    be_full = self.cfg.GetClusterInfo().FillBE(self.instance)
565 763ad5be Thomas Thrainer
566 763ad5be Thomas Thrainer
    # FIXME
567 763ad5be Thomas Thrainer
    # The allocator should actually run in "relocate" mode, but current
568 763ad5be Thomas Thrainer
    # allocators don't support relocating all the nodes of an instance at
569 763ad5be Thomas Thrainer
    # the same time. As a workaround we use "allocate" mode, but this is
570 763ad5be Thomas Thrainer
    # suboptimal for two reasons:
571 763ad5be Thomas Thrainer
    # - The instance name passed to the allocator is present in the list of
572 763ad5be Thomas Thrainer
    #   existing instances, so there could be a conflict within the
573 763ad5be Thomas Thrainer
    #   internal structures of the allocator. This doesn't happen with the
574 763ad5be Thomas Thrainer
    #   current allocators, but it's a liability.
575 763ad5be Thomas Thrainer
    # - The allocator counts the resources used by the instance twice: once
576 763ad5be Thomas Thrainer
    #   because the instance exists already, and once because it tries to
577 763ad5be Thomas Thrainer
    #   allocate a new instance.
578 763ad5be Thomas Thrainer
    # The allocator could choose some of the nodes on which the instance is
579 763ad5be Thomas Thrainer
    # running, but that's not a problem. If the instance nodes are broken,
580 763ad5be Thomas Thrainer
    # they should be already be marked as drained or offline, and hence
581 763ad5be Thomas Thrainer
    # skipped by the allocator. If instance disks have been lost for other
582 763ad5be Thomas Thrainer
    # reasons, then recreating the disks on the same nodes should be fine.
583 763ad5be Thomas Thrainer
    disk_template = self.instance.disk_template
584 763ad5be Thomas Thrainer
    spindle_use = be_full[constants.BE_SPINDLE_USE]
585 0e514de1 Bernardo Dal Seno
    disks = [{
586 0e514de1 Bernardo Dal Seno
      constants.IDISK_SIZE: d.size,
587 0e514de1 Bernardo Dal Seno
      constants.IDISK_MODE: d.mode,
588 0e514de1 Bernardo Dal Seno
      constants.IDISK_SPINDLES: d.spindles,
589 0e514de1 Bernardo Dal Seno
      } for d in self.instance.disks]
590 763ad5be Thomas Thrainer
    req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
591 763ad5be Thomas Thrainer
                                        disk_template=disk_template,
592 763ad5be Thomas Thrainer
                                        tags=list(self.instance.GetTags()),
593 763ad5be Thomas Thrainer
                                        os=self.instance.os,
594 763ad5be Thomas Thrainer
                                        nics=[{}],
595 763ad5be Thomas Thrainer
                                        vcpus=be_full[constants.BE_VCPUS],
596 763ad5be Thomas Thrainer
                                        memory=be_full[constants.BE_MAXMEM],
597 763ad5be Thomas Thrainer
                                        spindle_use=spindle_use,
598 0e514de1 Bernardo Dal Seno
                                        disks=disks,
599 763ad5be Thomas Thrainer
                                        hypervisor=self.instance.hypervisor,
600 763ad5be Thomas Thrainer
                                        node_whitelist=None)
601 763ad5be Thomas Thrainer
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
602 763ad5be Thomas Thrainer
603 763ad5be Thomas Thrainer
    ial.Run(self.op.iallocator)
604 763ad5be Thomas Thrainer
605 763ad5be Thomas Thrainer
    assert req.RequiredNodes() == len(self.instance.all_nodes)
606 763ad5be Thomas Thrainer
607 763ad5be Thomas Thrainer
    if not ial.success:
608 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
609 763ad5be Thomas Thrainer
                                 " %s" % (self.op.iallocator, ial.info),
610 763ad5be Thomas Thrainer
                                 errors.ECODE_NORES)
611 763ad5be Thomas Thrainer
612 1c3231aa Thomas Thrainer
    (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, ial.result)
613 763ad5be Thomas Thrainer
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
614 763ad5be Thomas Thrainer
                 self.op.instance_name, self.op.iallocator,
615 1c3231aa Thomas Thrainer
                 utils.CommaJoin(self.op.nodes))
616 763ad5be Thomas Thrainer
617 763ad5be Thomas Thrainer
  def CheckArguments(self):
618 763ad5be Thomas Thrainer
    if self.op.disks and ht.TNonNegativeInt(self.op.disks[0]):
619 763ad5be Thomas Thrainer
      # Normalize and convert deprecated list of disk indices
620 763ad5be Thomas Thrainer
      self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
621 763ad5be Thomas Thrainer
622 763ad5be Thomas Thrainer
    duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
623 763ad5be Thomas Thrainer
    if duplicates:
624 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Some disks have been specified more than"
625 763ad5be Thomas Thrainer
                                 " once: %s" % utils.CommaJoin(duplicates),
626 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
627 763ad5be Thomas Thrainer
628 763ad5be Thomas Thrainer
    # We don't want _CheckIAllocatorOrNode selecting the default iallocator
629 763ad5be Thomas Thrainer
    # when neither iallocator nor nodes are specified
630 763ad5be Thomas Thrainer
    if self.op.iallocator or self.op.nodes:
631 5eacbcae Thomas Thrainer
      CheckIAllocatorOrNode(self, "iallocator", "nodes")
632 763ad5be Thomas Thrainer
633 763ad5be Thomas Thrainer
    for (idx, params) in self.op.disks:
634 763ad5be Thomas Thrainer
      utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
635 763ad5be Thomas Thrainer
      unsupported = frozenset(params.keys()) - self._MODIFYABLE
636 763ad5be Thomas Thrainer
      if unsupported:
637 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Parameters for disk %s try to change"
638 763ad5be Thomas Thrainer
                                   " unmodifyable parameter(s): %s" %
639 763ad5be Thomas Thrainer
                                   (idx, utils.CommaJoin(unsupported)),
640 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
641 763ad5be Thomas Thrainer
642 763ad5be Thomas Thrainer
  def ExpandNames(self):
643 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
644 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
645 763ad5be Thomas Thrainer
646 763ad5be Thomas Thrainer
    if self.op.nodes:
647 1c3231aa Thomas Thrainer
      (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
648 1c3231aa Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = list(self.op.node_uuids)
649 763ad5be Thomas Thrainer
    else:
650 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = []
651 763ad5be Thomas Thrainer
      if self.op.iallocator:
652 763ad5be Thomas Thrainer
        # iallocator will select a new node in the same group
653 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
654 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
655 763ad5be Thomas Thrainer
656 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
657 763ad5be Thomas Thrainer
658 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
659 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODEGROUP:
660 763ad5be Thomas Thrainer
      assert self.op.iallocator is not None
661 763ad5be Thomas Thrainer
      assert not self.op.nodes
662 763ad5be Thomas Thrainer
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
663 763ad5be Thomas Thrainer
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
664 763ad5be Thomas Thrainer
      # Lock the primary group used by the instance optimistically; this
665 763ad5be Thomas Thrainer
      # requires going via the node before it's locked, requiring
666 763ad5be Thomas Thrainer
      # verification later on
667 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
668 da4a52a3 Thomas Thrainer
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid, primary_only=True)
669 763ad5be Thomas Thrainer
670 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE:
671 763ad5be Thomas Thrainer
      # If an allocator is used, then we lock all the nodes in the current
672 763ad5be Thomas Thrainer
      # instance group, as we don't know yet which ones will be selected;
673 763ad5be Thomas Thrainer
      # if we replace the nodes without using an allocator, locks are
674 763ad5be Thomas Thrainer
      # already declared in ExpandNames; otherwise, we need to lock all the
675 763ad5be Thomas Thrainer
      # instance nodes for disk re-creation
676 763ad5be Thomas Thrainer
      if self.op.iallocator:
677 763ad5be Thomas Thrainer
        assert not self.op.nodes
678 763ad5be Thomas Thrainer
        assert not self.needed_locks[locking.LEVEL_NODE]
679 763ad5be Thomas Thrainer
        assert len(self.owned_locks(locking.LEVEL_NODEGROUP)) == 1
680 763ad5be Thomas Thrainer
681 763ad5be Thomas Thrainer
        # Lock member nodes of the group of the primary node
682 763ad5be Thomas Thrainer
        for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP):
683 763ad5be Thomas Thrainer
          self.needed_locks[locking.LEVEL_NODE].extend(
684 763ad5be Thomas Thrainer
            self.cfg.GetNodeGroup(group_uuid).members)
685 763ad5be Thomas Thrainer
686 763ad5be Thomas Thrainer
        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
687 763ad5be Thomas Thrainer
      elif not self.op.nodes:
688 763ad5be Thomas Thrainer
        self._LockInstancesNodes(primary_only=False)
689 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
690 763ad5be Thomas Thrainer
      # Copy node locks
691 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
692 5eacbcae Thomas Thrainer
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
693 763ad5be Thomas Thrainer
694 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
695 763ad5be Thomas Thrainer
    """Build hooks env.
696 763ad5be Thomas Thrainer

697 763ad5be Thomas Thrainer
    This runs on master, primary and secondary nodes of the instance.
698 763ad5be Thomas Thrainer

699 763ad5be Thomas Thrainer
    """
700 5eacbcae Thomas Thrainer
    return BuildInstanceHookEnvByObject(self, self.instance)
701 763ad5be Thomas Thrainer
702 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
703 763ad5be Thomas Thrainer
    """Build hooks nodes.
704 763ad5be Thomas Thrainer

705 763ad5be Thomas Thrainer
    """
706 763ad5be Thomas Thrainer
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
707 763ad5be Thomas Thrainer
    return (nl, nl)
708 763ad5be Thomas Thrainer
709 763ad5be Thomas Thrainer
  def CheckPrereq(self):
710 763ad5be Thomas Thrainer
    """Check prerequisites.
711 763ad5be Thomas Thrainer

712 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster and is not running.
713 763ad5be Thomas Thrainer

714 763ad5be Thomas Thrainer
    """
715 da4a52a3 Thomas Thrainer
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
716 763ad5be Thomas Thrainer
    assert instance is not None, \
717 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
718 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
719 1c3231aa Thomas Thrainer
      if len(self.op.node_uuids) != len(instance.all_nodes):
720 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
721 763ad5be Thomas Thrainer
                                   " %d replacement nodes were specified" %
722 763ad5be Thomas Thrainer
                                   (instance.name, len(instance.all_nodes),
723 1c3231aa Thomas Thrainer
                                    len(self.op.node_uuids)),
724 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
725 763ad5be Thomas Thrainer
      assert instance.disk_template != constants.DT_DRBD8 or \
726 1c3231aa Thomas Thrainer
             len(self.op.node_uuids) == 2
727 763ad5be Thomas Thrainer
      assert instance.disk_template != constants.DT_PLAIN or \
728 1c3231aa Thomas Thrainer
             len(self.op.node_uuids) == 1
729 1c3231aa Thomas Thrainer
      primary_node = self.op.node_uuids[0]
730 763ad5be Thomas Thrainer
    else:
731 763ad5be Thomas Thrainer
      primary_node = instance.primary_node
732 763ad5be Thomas Thrainer
    if not self.op.iallocator:
733 5eacbcae Thomas Thrainer
      CheckNodeOnline(self, primary_node)
734 763ad5be Thomas Thrainer
735 763ad5be Thomas Thrainer
    if instance.disk_template == constants.DT_DISKLESS:
736 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Instance '%s' has no disks" %
737 763ad5be Thomas Thrainer
                                 self.op.instance_name, errors.ECODE_INVAL)
738 763ad5be Thomas Thrainer
739 763ad5be Thomas Thrainer
    # Verify if node group locks are still correct
740 763ad5be Thomas Thrainer
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
741 763ad5be Thomas Thrainer
    if owned_groups:
742 763ad5be Thomas Thrainer
      # Node group locks are acquired only for the primary node (and only
743 763ad5be Thomas Thrainer
      # when the allocator is used)
744 da4a52a3 Thomas Thrainer
      CheckInstanceNodeGroups(self.cfg, instance.uuid, owned_groups,
745 5eacbcae Thomas Thrainer
                              primary_only=True)
746 763ad5be Thomas Thrainer
747 763ad5be Thomas Thrainer
    # if we replace nodes *and* the old primary is offline, we don't
748 763ad5be Thomas Thrainer
    # check the instance state
749 763ad5be Thomas Thrainer
    old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
750 1c3231aa Thomas Thrainer
    if not ((self.op.iallocator or self.op.node_uuids) and old_pnode.offline):
751 5eacbcae Thomas Thrainer
      CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
752 5eacbcae Thomas Thrainer
                         msg="cannot recreate disks")
753 763ad5be Thomas Thrainer
754 763ad5be Thomas Thrainer
    if self.op.disks:
755 763ad5be Thomas Thrainer
      self.disks = dict(self.op.disks)
756 763ad5be Thomas Thrainer
    else:
757 763ad5be Thomas Thrainer
      self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
758 763ad5be Thomas Thrainer
759 763ad5be Thomas Thrainer
    maxidx = max(self.disks.keys())
760 763ad5be Thomas Thrainer
    if maxidx >= len(instance.disks):
761 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
762 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
763 763ad5be Thomas Thrainer
764 1c3231aa Thomas Thrainer
    if ((self.op.node_uuids or self.op.iallocator) and
765 763ad5be Thomas Thrainer
         sorted(self.disks.keys()) != range(len(instance.disks))):
766 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't recreate disks partially and"
767 763ad5be Thomas Thrainer
                                 " change the nodes at the same time",
768 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
769 763ad5be Thomas Thrainer
770 763ad5be Thomas Thrainer
    self.instance = instance
771 763ad5be Thomas Thrainer
772 763ad5be Thomas Thrainer
    if self.op.iallocator:
773 763ad5be Thomas Thrainer
      self._RunAllocator()
774 763ad5be Thomas Thrainer
      # Release unneeded node and node resource locks
775 1c3231aa Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.node_uuids)
776 1c3231aa Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.node_uuids)
777 5eacbcae Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
778 763ad5be Thomas Thrainer
779 763ad5be Thomas Thrainer
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
780 763ad5be Thomas Thrainer
781 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
782 1c3231aa Thomas Thrainer
      node_uuids = self.op.node_uuids
783 3f3ea14c Bernardo Dal Seno
    else:
784 1c3231aa Thomas Thrainer
      node_uuids = instance.all_nodes
785 3f3ea14c Bernardo Dal Seno
    excl_stor = compat.any(
786 1c3231aa Thomas Thrainer
      rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
787 3f3ea14c Bernardo Dal Seno
      )
788 3f3ea14c Bernardo Dal Seno
    for new_params in self.disks.values():
789 7c848a6a Bernardo Dal Seno
      CheckSpindlesExclusiveStorage(new_params, excl_stor, False)
790 3f3ea14c Bernardo Dal Seno
791 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
792 763ad5be Thomas Thrainer
    """Recreate the disks.
793 763ad5be Thomas Thrainer

794 763ad5be Thomas Thrainer
    """
795 763ad5be Thomas Thrainer
    assert (self.owned_locks(locking.LEVEL_NODE) ==
796 763ad5be Thomas Thrainer
            self.owned_locks(locking.LEVEL_NODE_RES))
797 763ad5be Thomas Thrainer
798 763ad5be Thomas Thrainer
    to_skip = []
799 763ad5be Thomas Thrainer
    mods = [] # keeps track of needed changes
800 763ad5be Thomas Thrainer
801 d0d7d7cf Thomas Thrainer
    for idx, disk in enumerate(self.instance.disks):
802 763ad5be Thomas Thrainer
      try:
803 763ad5be Thomas Thrainer
        changes = self.disks[idx]
804 763ad5be Thomas Thrainer
      except KeyError:
805 763ad5be Thomas Thrainer
        # Disk should not be recreated
806 763ad5be Thomas Thrainer
        to_skip.append(idx)
807 763ad5be Thomas Thrainer
        continue
808 763ad5be Thomas Thrainer
809 763ad5be Thomas Thrainer
      # update secondaries for disks, if needed
810 cd3b4ff4 Helga Velroyen
      if self.op.node_uuids and disk.dev_type == constants.DT_DRBD8:
811 763ad5be Thomas Thrainer
        # need to update the nodes and minors
812 1c3231aa Thomas Thrainer
        assert len(self.op.node_uuids) == 2
813 763ad5be Thomas Thrainer
        assert len(disk.logical_id) == 6 # otherwise disk internals
814 763ad5be Thomas Thrainer
                                         # have changed
815 763ad5be Thomas Thrainer
        (_, _, old_port, _, _, old_secret) = disk.logical_id
816 1c3231aa Thomas Thrainer
        new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids,
817 da4a52a3 Thomas Thrainer
                                                self.instance.uuid)
818 1c3231aa Thomas Thrainer
        new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port,
819 763ad5be Thomas Thrainer
                  new_minors[0], new_minors[1], old_secret)
820 763ad5be Thomas Thrainer
        assert len(disk.logical_id) == len(new_id)
821 763ad5be Thomas Thrainer
      else:
822 763ad5be Thomas Thrainer
        new_id = None
823 763ad5be Thomas Thrainer
824 763ad5be Thomas Thrainer
      mods.append((idx, new_id, changes))
825 763ad5be Thomas Thrainer
826 763ad5be Thomas Thrainer
    # now that we have passed all asserts above, we can apply the mods
827 763ad5be Thomas Thrainer
    # in a single run (to avoid partial changes)
828 763ad5be Thomas Thrainer
    for idx, new_id, changes in mods:
829 d0d7d7cf Thomas Thrainer
      disk = self.instance.disks[idx]
830 763ad5be Thomas Thrainer
      if new_id is not None:
831 cd3b4ff4 Helga Velroyen
        assert disk.dev_type == constants.DT_DRBD8
832 763ad5be Thomas Thrainer
        disk.logical_id = new_id
833 763ad5be Thomas Thrainer
      if changes:
834 763ad5be Thomas Thrainer
        disk.Update(size=changes.get(constants.IDISK_SIZE, None),
835 b54ecf12 Bernardo Dal Seno
                    mode=changes.get(constants.IDISK_MODE, None),
836 b54ecf12 Bernardo Dal Seno
                    spindles=changes.get(constants.IDISK_SPINDLES, None))
837 763ad5be Thomas Thrainer
838 763ad5be Thomas Thrainer
    # change primary node, if needed
839 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
840 d0d7d7cf Thomas Thrainer
      self.instance.primary_node = self.op.node_uuids[0]
841 763ad5be Thomas Thrainer
      self.LogWarning("Changing the instance's nodes, you will have to"
842 763ad5be Thomas Thrainer
                      " remove any disks left on the older nodes manually")
843 763ad5be Thomas Thrainer
844 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
845 d0d7d7cf Thomas Thrainer
      self.cfg.Update(self.instance, feedback_fn)
846 763ad5be Thomas Thrainer
847 763ad5be Thomas Thrainer
    # All touched nodes must be locked
848 763ad5be Thomas Thrainer
    mylocks = self.owned_locks(locking.LEVEL_NODE)
849 d0d7d7cf Thomas Thrainer
    assert mylocks.issuperset(frozenset(self.instance.all_nodes))
850 d0d7d7cf Thomas Thrainer
    new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
851 a365b47f Bernardo Dal Seno
852 a365b47f Bernardo Dal Seno
    # TODO: Release node locks before wiping, or explain why it's not possible
853 a365b47f Bernardo Dal Seno
    if self.cfg.GetClusterInfo().prealloc_wipe_disks:
854 a365b47f Bernardo Dal Seno
      wipedisks = [(idx, disk, 0)
855 d0d7d7cf Thomas Thrainer
                   for (idx, disk) in enumerate(self.instance.disks)
856 a365b47f Bernardo Dal Seno
                   if idx not in to_skip]
857 d0d7d7cf Thomas Thrainer
      WipeOrCleanupDisks(self, self.instance, disks=wipedisks,
858 d0d7d7cf Thomas Thrainer
                         cleanup=new_disks)
859 763ad5be Thomas Thrainer
860 763ad5be Thomas Thrainer
861 d90f0cb4 Helga Velroyen
def _PerformNodeInfoCall(lu, node_uuids, vg):
862 d90f0cb4 Helga Velroyen
  """Prepares the input and performs a node info call.
863 d90f0cb4 Helga Velroyen

864 d90f0cb4 Helga Velroyen
  @type lu: C{LogicalUnit}
865 d90f0cb4 Helga Velroyen
  @param lu: a logical unit from which we get configuration data
866 d90f0cb4 Helga Velroyen
  @type node_uuids: list of string
867 d90f0cb4 Helga Velroyen
  @param node_uuids: list of node UUIDs to perform the call for
868 d90f0cb4 Helga Velroyen
  @type vg: string
869 d90f0cb4 Helga Velroyen
  @param vg: the volume group's name
870 d90f0cb4 Helga Velroyen

871 d90f0cb4 Helga Velroyen
  """
872 d90f0cb4 Helga Velroyen
  lvm_storage_units = [(constants.ST_LVM_VG, vg)]
873 d90f0cb4 Helga Velroyen
  storage_units = rpc.PrepareStorageUnitsForNodes(lu.cfg, lvm_storage_units,
874 d90f0cb4 Helga Velroyen
                                                  node_uuids)
875 d90f0cb4 Helga Velroyen
  hvname = lu.cfg.GetHypervisorType()
876 d90f0cb4 Helga Velroyen
  hvparams = lu.cfg.GetClusterInfo().hvparams
877 d90f0cb4 Helga Velroyen
  nodeinfo = lu.rpc.call_node_info(node_uuids, storage_units,
878 d90f0cb4 Helga Velroyen
                                   [(hvname, hvparams[hvname])])
879 d90f0cb4 Helga Velroyen
  return nodeinfo
880 d90f0cb4 Helga Velroyen
881 d90f0cb4 Helga Velroyen
882 d90f0cb4 Helga Velroyen
def _CheckVgCapacityForNode(node_name, node_info, vg, requested):
883 d90f0cb4 Helga Velroyen
  """Checks the vg capacity for a given node.
884 d90f0cb4 Helga Velroyen

885 d90f0cb4 Helga Velroyen
  @type node_info: tuple (_, list of dicts, _)
886 d90f0cb4 Helga Velroyen
  @param node_info: the result of the node info call for one node
887 d90f0cb4 Helga Velroyen
  @type node_name: string
888 d90f0cb4 Helga Velroyen
  @param node_name: the name of the node
889 d90f0cb4 Helga Velroyen
  @type vg: string
890 d90f0cb4 Helga Velroyen
  @param vg: volume group name
891 d90f0cb4 Helga Velroyen
  @type requested: int
892 d90f0cb4 Helga Velroyen
  @param requested: the amount of disk in MiB to check for
893 d90f0cb4 Helga Velroyen
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
894 d90f0cb4 Helga Velroyen
      or we cannot check the node
895 d90f0cb4 Helga Velroyen

896 d90f0cb4 Helga Velroyen
  """
897 d90f0cb4 Helga Velroyen
  (_, space_info, _) = node_info
898 d90f0cb4 Helga Velroyen
  lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
899 d90f0cb4 Helga Velroyen
      space_info, constants.ST_LVM_VG)
900 d90f0cb4 Helga Velroyen
  if not lvm_vg_info:
901 d90f0cb4 Helga Velroyen
    raise errors.OpPrereqError("Can't retrieve storage information for LVM")
902 d90f0cb4 Helga Velroyen
  vg_free = lvm_vg_info.get("storage_free", None)
903 d90f0cb4 Helga Velroyen
  if not isinstance(vg_free, int):
904 d90f0cb4 Helga Velroyen
    raise errors.OpPrereqError("Can't compute free disk space on node"
905 d90f0cb4 Helga Velroyen
                               " %s for vg %s, result was '%s'" %
906 d90f0cb4 Helga Velroyen
                               (node_name, vg, vg_free), errors.ECODE_ENVIRON)
907 d90f0cb4 Helga Velroyen
  if requested > vg_free:
908 d90f0cb4 Helga Velroyen
    raise errors.OpPrereqError("Not enough disk space on target node %s"
909 d90f0cb4 Helga Velroyen
                               " vg %s: required %d MiB, available %d MiB" %
910 d90f0cb4 Helga Velroyen
                               (node_name, vg, requested, vg_free),
911 d90f0cb4 Helga Velroyen
                               errors.ECODE_NORES)
912 d90f0cb4 Helga Velroyen
913 d90f0cb4 Helga Velroyen
914 1c3231aa Thomas Thrainer
def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
915 763ad5be Thomas Thrainer
  """Checks if nodes have enough free disk space in the specified VG.
916 763ad5be Thomas Thrainer

917 763ad5be Thomas Thrainer
  This function checks if all given nodes have the needed amount of
918 763ad5be Thomas Thrainer
  free disk. In case any node has less disk or we cannot get the
919 763ad5be Thomas Thrainer
  information from the node, this function raises an OpPrereqError
920 763ad5be Thomas Thrainer
  exception.
921 763ad5be Thomas Thrainer

922 763ad5be Thomas Thrainer
  @type lu: C{LogicalUnit}
923 763ad5be Thomas Thrainer
  @param lu: a logical unit from which we get configuration data
924 1c3231aa Thomas Thrainer
  @type node_uuids: C{list}
925 1c3231aa Thomas Thrainer
  @param node_uuids: the list of node UUIDs to check
926 763ad5be Thomas Thrainer
  @type vg: C{str}
927 763ad5be Thomas Thrainer
  @param vg: the volume group to check
928 763ad5be Thomas Thrainer
  @type requested: C{int}
929 763ad5be Thomas Thrainer
  @param requested: the amount of disk in MiB to check for
930 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
931 763ad5be Thomas Thrainer
      or we cannot check the node
932 763ad5be Thomas Thrainer

933 763ad5be Thomas Thrainer
  """
934 d90f0cb4 Helga Velroyen
  nodeinfo = _PerformNodeInfoCall(lu, node_uuids, vg)
935 f667baab Thomas Thrainer
  for node_uuid in node_uuids:
936 f667baab Thomas Thrainer
    node_name = lu.cfg.GetNodeName(node_uuid)
937 f667baab Thomas Thrainer
    info = nodeinfo[node_uuid]
938 1c3231aa Thomas Thrainer
    info.Raise("Cannot get current information from node %s" % node_name,
939 763ad5be Thomas Thrainer
               prereq=True, ecode=errors.ECODE_ENVIRON)
940 d90f0cb4 Helga Velroyen
    _CheckVgCapacityForNode(node_name, info.payload, vg, requested)
941 763ad5be Thomas Thrainer
942 763ad5be Thomas Thrainer
943 1c3231aa Thomas Thrainer
def CheckNodesFreeDiskPerVG(lu, node_uuids, req_sizes):
944 763ad5be Thomas Thrainer
  """Checks if nodes have enough free disk space in all the VGs.
945 763ad5be Thomas Thrainer

946 763ad5be Thomas Thrainer
  This function checks if all given nodes have the needed amount of
947 763ad5be Thomas Thrainer
  free disk. In case any node has less disk or we cannot get the
948 763ad5be Thomas Thrainer
  information from the node, this function raises an OpPrereqError
949 763ad5be Thomas Thrainer
  exception.
950 763ad5be Thomas Thrainer

951 763ad5be Thomas Thrainer
  @type lu: C{LogicalUnit}
952 763ad5be Thomas Thrainer
  @param lu: a logical unit from which we get configuration data
953 1c3231aa Thomas Thrainer
  @type node_uuids: C{list}
954 1c3231aa Thomas Thrainer
  @param node_uuids: the list of node UUIDs to check
955 763ad5be Thomas Thrainer
  @type req_sizes: C{dict}
956 763ad5be Thomas Thrainer
  @param req_sizes: the hash of vg and corresponding amount of disk in
957 763ad5be Thomas Thrainer
      MiB to check for
958 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
959 763ad5be Thomas Thrainer
      or we cannot check the node
960 763ad5be Thomas Thrainer

961 763ad5be Thomas Thrainer
  """
962 763ad5be Thomas Thrainer
  for vg, req_size in req_sizes.items():
963 1c3231aa Thomas Thrainer
    _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, req_size)
964 763ad5be Thomas Thrainer
965 763ad5be Thomas Thrainer
966 763ad5be Thomas Thrainer
def _DiskSizeInBytesToMebibytes(lu, size):
967 763ad5be Thomas Thrainer
  """Converts a disk size in bytes to mebibytes.
968 763ad5be Thomas Thrainer

969 763ad5be Thomas Thrainer
  Warns and rounds up if the size isn't an even multiple of 1 MiB.
970 763ad5be Thomas Thrainer

971 763ad5be Thomas Thrainer
  """
972 763ad5be Thomas Thrainer
  (mib, remainder) = divmod(size, 1024 * 1024)
973 763ad5be Thomas Thrainer
974 763ad5be Thomas Thrainer
  if remainder != 0:
975 763ad5be Thomas Thrainer
    lu.LogWarning("Disk size is not an even multiple of 1 MiB; rounding up"
976 763ad5be Thomas Thrainer
                  " to not overwrite existing data (%s bytes will not be"
977 763ad5be Thomas Thrainer
                  " wiped)", (1024 * 1024) - remainder)
978 763ad5be Thomas Thrainer
    mib += 1
979 763ad5be Thomas Thrainer
980 763ad5be Thomas Thrainer
  return mib
981 763ad5be Thomas Thrainer
982 763ad5be Thomas Thrainer
983 763ad5be Thomas Thrainer
def _CalcEta(time_taken, written, total_size):
984 763ad5be Thomas Thrainer
  """Calculates the ETA based on size written and total size.
985 763ad5be Thomas Thrainer

986 763ad5be Thomas Thrainer
  @param time_taken: The time taken so far
987 763ad5be Thomas Thrainer
  @param written: amount written so far
988 763ad5be Thomas Thrainer
  @param total_size: The total size of data to be written
989 763ad5be Thomas Thrainer
  @return: The remaining time in seconds
990 763ad5be Thomas Thrainer

991 763ad5be Thomas Thrainer
  """
992 763ad5be Thomas Thrainer
  avg_time = time_taken / float(written)
993 763ad5be Thomas Thrainer
  return (total_size - written) * avg_time
994 763ad5be Thomas Thrainer
995 763ad5be Thomas Thrainer
996 5eacbcae Thomas Thrainer
def WipeDisks(lu, instance, disks=None):
997 763ad5be Thomas Thrainer
  """Wipes instance disks.
998 763ad5be Thomas Thrainer

999 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
1000 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
1001 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
1002 763ad5be Thomas Thrainer
  @param instance: the instance whose disks we should create
1003 763ad5be Thomas Thrainer
  @type disks: None or list of tuple of (number, L{objects.Disk}, number)
1004 763ad5be Thomas Thrainer
  @param disks: Disk details; tuple contains disk index, disk object and the
1005 763ad5be Thomas Thrainer
    start offset
1006 763ad5be Thomas Thrainer

1007 763ad5be Thomas Thrainer
  """
1008 1c3231aa Thomas Thrainer
  node_uuid = instance.primary_node
1009 1c3231aa Thomas Thrainer
  node_name = lu.cfg.GetNodeName(node_uuid)
1010 763ad5be Thomas Thrainer
1011 763ad5be Thomas Thrainer
  if disks is None:
1012 763ad5be Thomas Thrainer
    disks = [(idx, disk, 0)
1013 763ad5be Thomas Thrainer
             for (idx, disk) in enumerate(instance.disks)]
1014 763ad5be Thomas Thrainer
1015 763ad5be Thomas Thrainer
  logging.info("Pausing synchronization of disks of instance '%s'",
1016 763ad5be Thomas Thrainer
               instance.name)
1017 1c3231aa Thomas Thrainer
  result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
1018 763ad5be Thomas Thrainer
                                                  (map(compat.snd, disks),
1019 763ad5be Thomas Thrainer
                                                   instance),
1020 763ad5be Thomas Thrainer
                                                  True)
1021 1c3231aa Thomas Thrainer
  result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
1022 763ad5be Thomas Thrainer
1023 763ad5be Thomas Thrainer
  for idx, success in enumerate(result.payload):
1024 763ad5be Thomas Thrainer
    if not success:
1025 763ad5be Thomas Thrainer
      logging.warn("Pausing synchronization of disk %s of instance '%s'"
1026 763ad5be Thomas Thrainer
                   " failed", idx, instance.name)
1027 763ad5be Thomas Thrainer
1028 763ad5be Thomas Thrainer
  try:
1029 763ad5be Thomas Thrainer
    for (idx, device, offset) in disks:
1030 763ad5be Thomas Thrainer
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
1031 763ad5be Thomas Thrainer
      # MAX_WIPE_CHUNK at max. Truncating to integer to avoid rounding errors.
1032 763ad5be Thomas Thrainer
      wipe_chunk_size = \
1033 763ad5be Thomas Thrainer
        int(min(constants.MAX_WIPE_CHUNK,
1034 763ad5be Thomas Thrainer
                device.size / 100.0 * constants.MIN_WIPE_CHUNK_PERCENT))
1035 763ad5be Thomas Thrainer
1036 763ad5be Thomas Thrainer
      size = device.size
1037 763ad5be Thomas Thrainer
      last_output = 0
1038 763ad5be Thomas Thrainer
      start_time = time.time()
1039 763ad5be Thomas Thrainer
1040 763ad5be Thomas Thrainer
      if offset == 0:
1041 763ad5be Thomas Thrainer
        info_text = ""
1042 763ad5be Thomas Thrainer
      else:
1043 763ad5be Thomas Thrainer
        info_text = (" (from %s to %s)" %
1044 763ad5be Thomas Thrainer
                     (utils.FormatUnit(offset, "h"),
1045 763ad5be Thomas Thrainer
                      utils.FormatUnit(size, "h")))
1046 763ad5be Thomas Thrainer
1047 763ad5be Thomas Thrainer
      lu.LogInfo("* Wiping disk %s%s", idx, info_text)
1048 763ad5be Thomas Thrainer
1049 763ad5be Thomas Thrainer
      logging.info("Wiping disk %d for instance %s on node %s using"
1050 1c3231aa Thomas Thrainer
                   " chunk size %s", idx, instance.name, node_name,
1051 1c3231aa Thomas Thrainer
                   wipe_chunk_size)
1052 763ad5be Thomas Thrainer
1053 763ad5be Thomas Thrainer
      while offset < size:
1054 763ad5be Thomas Thrainer
        wipe_size = min(wipe_chunk_size, size - offset)
1055 763ad5be Thomas Thrainer
1056 763ad5be Thomas Thrainer
        logging.debug("Wiping disk %d, offset %s, chunk %s",
1057 763ad5be Thomas Thrainer
                      idx, offset, wipe_size)
1058 763ad5be Thomas Thrainer
1059 1c3231aa Thomas Thrainer
        result = lu.rpc.call_blockdev_wipe(node_uuid, (device, instance),
1060 1c3231aa Thomas Thrainer
                                           offset, wipe_size)
1061 763ad5be Thomas Thrainer
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
1062 763ad5be Thomas Thrainer
                     (idx, offset, wipe_size))
1063 763ad5be Thomas Thrainer
1064 763ad5be Thomas Thrainer
        now = time.time()
1065 763ad5be Thomas Thrainer
        offset += wipe_size
1066 763ad5be Thomas Thrainer
        if now - last_output >= 60:
1067 763ad5be Thomas Thrainer
          eta = _CalcEta(now - start_time, offset, size)
1068 763ad5be Thomas Thrainer
          lu.LogInfo(" - done: %.1f%% ETA: %s",
1069 763ad5be Thomas Thrainer
                     offset / float(size) * 100, utils.FormatSeconds(eta))
1070 763ad5be Thomas Thrainer
          last_output = now
1071 763ad5be Thomas Thrainer
  finally:
1072 763ad5be Thomas Thrainer
    logging.info("Resuming synchronization of disks for instance '%s'",
1073 763ad5be Thomas Thrainer
                 instance.name)
1074 763ad5be Thomas Thrainer
1075 1c3231aa Thomas Thrainer
    result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
1076 763ad5be Thomas Thrainer
                                                    (map(compat.snd, disks),
1077 763ad5be Thomas Thrainer
                                                     instance),
1078 763ad5be Thomas Thrainer
                                                    False)
1079 763ad5be Thomas Thrainer
1080 763ad5be Thomas Thrainer
    if result.fail_msg:
1081 763ad5be Thomas Thrainer
      lu.LogWarning("Failed to resume disk synchronization on node '%s': %s",
1082 1c3231aa Thomas Thrainer
                    node_name, result.fail_msg)
1083 763ad5be Thomas Thrainer
    else:
1084 763ad5be Thomas Thrainer
      for idx, success in enumerate(result.payload):
1085 763ad5be Thomas Thrainer
        if not success:
1086 763ad5be Thomas Thrainer
          lu.LogWarning("Resuming synchronization of disk %s of instance '%s'"
1087 763ad5be Thomas Thrainer
                        " failed", idx, instance.name)
1088 763ad5be Thomas Thrainer
1089 763ad5be Thomas Thrainer
1090 a365b47f Bernardo Dal Seno
def WipeOrCleanupDisks(lu, instance, disks=None, cleanup=None):
1091 a365b47f Bernardo Dal Seno
  """Wrapper for L{WipeDisks} that handles errors.
1092 a365b47f Bernardo Dal Seno

1093 a365b47f Bernardo Dal Seno
  @type lu: L{LogicalUnit}
1094 a365b47f Bernardo Dal Seno
  @param lu: the logical unit on whose behalf we execute
1095 a365b47f Bernardo Dal Seno
  @type instance: L{objects.Instance}
1096 a365b47f Bernardo Dal Seno
  @param instance: the instance whose disks we should wipe
1097 a365b47f Bernardo Dal Seno
  @param disks: see L{WipeDisks}
1098 a365b47f Bernardo Dal Seno
  @param cleanup: the result returned by L{CreateDisks}, used for cleanup in
1099 a365b47f Bernardo Dal Seno
      case of error
1100 a365b47f Bernardo Dal Seno
  @raise errors.OpPrereqError: in case of failure
1101 a365b47f Bernardo Dal Seno

1102 a365b47f Bernardo Dal Seno
  """
1103 a365b47f Bernardo Dal Seno
  try:
1104 a365b47f Bernardo Dal Seno
    WipeDisks(lu, instance, disks=disks)
1105 a365b47f Bernardo Dal Seno
  except errors.OpExecError:
1106 a365b47f Bernardo Dal Seno
    logging.warning("Wiping disks for instance '%s' failed",
1107 a365b47f Bernardo Dal Seno
                    instance.name)
1108 0c3d9c7c Thomas Thrainer
    _UndoCreateDisks(lu, cleanup, instance)
1109 a365b47f Bernardo Dal Seno
    raise
1110 a365b47f Bernardo Dal Seno
1111 a365b47f Bernardo Dal Seno
1112 5eacbcae Thomas Thrainer
def ExpandCheckDisks(instance, disks):
1113 763ad5be Thomas Thrainer
  """Return the instance disks selected by the disks list
1114 763ad5be Thomas Thrainer

1115 763ad5be Thomas Thrainer
  @type disks: list of L{objects.Disk} or None
1116 763ad5be Thomas Thrainer
  @param disks: selected disks
1117 763ad5be Thomas Thrainer
  @rtype: list of L{objects.Disk}
1118 763ad5be Thomas Thrainer
  @return: selected instance disks to act on
1119 763ad5be Thomas Thrainer

1120 763ad5be Thomas Thrainer
  """
1121 763ad5be Thomas Thrainer
  if disks is None:
1122 763ad5be Thomas Thrainer
    return instance.disks
1123 763ad5be Thomas Thrainer
  else:
1124 763ad5be Thomas Thrainer
    if not set(disks).issubset(instance.disks):
1125 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Can only act on disks belonging to the"
1126 328201a5 Guido Trotter
                                   " target instance: expected a subset of %r,"
1127 328201a5 Guido Trotter
                                   " got %r" % (instance.disks, disks))
1128 763ad5be Thomas Thrainer
    return disks
1129 763ad5be Thomas Thrainer
1130 763ad5be Thomas Thrainer
1131 5eacbcae Thomas Thrainer
def WaitForSync(lu, instance, disks=None, oneshot=False):
1132 763ad5be Thomas Thrainer
  """Sleep and poll for an instance's disk to sync.
1133 763ad5be Thomas Thrainer

1134 763ad5be Thomas Thrainer
  """
1135 763ad5be Thomas Thrainer
  if not instance.disks or disks is not None and not disks:
1136 763ad5be Thomas Thrainer
    return True
1137 763ad5be Thomas Thrainer
1138 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1139 763ad5be Thomas Thrainer
1140 763ad5be Thomas Thrainer
  if not oneshot:
1141 763ad5be Thomas Thrainer
    lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
1142 763ad5be Thomas Thrainer
1143 1c3231aa Thomas Thrainer
  node_uuid = instance.primary_node
1144 1c3231aa Thomas Thrainer
  node_name = lu.cfg.GetNodeName(node_uuid)
1145 763ad5be Thomas Thrainer
1146 763ad5be Thomas Thrainer
  # TODO: Convert to utils.Retry
1147 763ad5be Thomas Thrainer
1148 763ad5be Thomas Thrainer
  retries = 0
1149 763ad5be Thomas Thrainer
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1150 763ad5be Thomas Thrainer
  while True:
1151 763ad5be Thomas Thrainer
    max_time = 0
1152 763ad5be Thomas Thrainer
    done = True
1153 763ad5be Thomas Thrainer
    cumul_degraded = False
1154 1c3231aa Thomas Thrainer
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node_uuid, (disks, instance))
1155 763ad5be Thomas Thrainer
    msg = rstats.fail_msg
1156 763ad5be Thomas Thrainer
    if msg:
1157 1c3231aa Thomas Thrainer
      lu.LogWarning("Can't get any data from node %s: %s", node_name, msg)
1158 763ad5be Thomas Thrainer
      retries += 1
1159 763ad5be Thomas Thrainer
      if retries >= 10:
1160 763ad5be Thomas Thrainer
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1161 1c3231aa Thomas Thrainer
                                 " aborting." % node_name)
1162 763ad5be Thomas Thrainer
      time.sleep(6)
1163 763ad5be Thomas Thrainer
      continue
1164 763ad5be Thomas Thrainer
    rstats = rstats.payload
1165 763ad5be Thomas Thrainer
    retries = 0
1166 763ad5be Thomas Thrainer
    for i, mstat in enumerate(rstats):
1167 763ad5be Thomas Thrainer
      if mstat is None:
1168 763ad5be Thomas Thrainer
        lu.LogWarning("Can't compute data for node %s/%s",
1169 1c3231aa Thomas Thrainer
                      node_name, disks[i].iv_name)
1170 763ad5be Thomas Thrainer
        continue
1171 763ad5be Thomas Thrainer
1172 763ad5be Thomas Thrainer
      cumul_degraded = (cumul_degraded or
1173 763ad5be Thomas Thrainer
                        (mstat.is_degraded and mstat.sync_percent is None))
1174 763ad5be Thomas Thrainer
      if mstat.sync_percent is not None:
1175 763ad5be Thomas Thrainer
        done = False
1176 763ad5be Thomas Thrainer
        if mstat.estimated_time is not None:
1177 763ad5be Thomas Thrainer
          rem_time = ("%s remaining (estimated)" %
1178 763ad5be Thomas Thrainer
                      utils.FormatSeconds(mstat.estimated_time))
1179 763ad5be Thomas Thrainer
          max_time = mstat.estimated_time
1180 763ad5be Thomas Thrainer
        else:
1181 763ad5be Thomas Thrainer
          rem_time = "no time estimate"
1182 763ad5be Thomas Thrainer
        lu.LogInfo("- device %s: %5.2f%% done, %s",
1183 763ad5be Thomas Thrainer
                   disks[i].iv_name, mstat.sync_percent, rem_time)
1184 763ad5be Thomas Thrainer
1185 763ad5be Thomas Thrainer
    # if we're done but degraded, let's do a few small retries, to
1186 763ad5be Thomas Thrainer
    # make sure we see a stable and not transient situation; therefore
1187 763ad5be Thomas Thrainer
    # we force restart of the loop
1188 763ad5be Thomas Thrainer
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1189 763ad5be Thomas Thrainer
      logging.info("Degraded disks found, %d retries left", degr_retries)
1190 763ad5be Thomas Thrainer
      degr_retries -= 1
1191 763ad5be Thomas Thrainer
      time.sleep(1)
1192 763ad5be Thomas Thrainer
      continue
1193 763ad5be Thomas Thrainer
1194 763ad5be Thomas Thrainer
    if done or oneshot:
1195 763ad5be Thomas Thrainer
      break
1196 763ad5be Thomas Thrainer
1197 763ad5be Thomas Thrainer
    time.sleep(min(60, max_time))
1198 763ad5be Thomas Thrainer
1199 763ad5be Thomas Thrainer
  if done:
1200 763ad5be Thomas Thrainer
    lu.LogInfo("Instance %s's disks are in sync", instance.name)
1201 763ad5be Thomas Thrainer
1202 763ad5be Thomas Thrainer
  return not cumul_degraded
1203 763ad5be Thomas Thrainer
1204 763ad5be Thomas Thrainer
1205 5eacbcae Thomas Thrainer
def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
1206 763ad5be Thomas Thrainer
  """Shutdown block devices of an instance.
1207 763ad5be Thomas Thrainer

1208 763ad5be Thomas Thrainer
  This does the shutdown on all nodes of the instance.
1209 763ad5be Thomas Thrainer

1210 763ad5be Thomas Thrainer
  If the ignore_primary is false, errors on the primary node are
1211 763ad5be Thomas Thrainer
  ignored.
1212 763ad5be Thomas Thrainer

1213 763ad5be Thomas Thrainer
  """
1214 da4a52a3 Thomas Thrainer
  lu.cfg.MarkInstanceDisksInactive(instance.uuid)
1215 763ad5be Thomas Thrainer
  all_result = True
1216 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1217 763ad5be Thomas Thrainer
1218 763ad5be Thomas Thrainer
  for disk in disks:
1219 1c3231aa Thomas Thrainer
    for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
1220 1c3231aa Thomas Thrainer
      result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance))
1221 763ad5be Thomas Thrainer
      msg = result.fail_msg
1222 763ad5be Thomas Thrainer
      if msg:
1223 763ad5be Thomas Thrainer
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
1224 1c3231aa Thomas Thrainer
                      disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
1225 1c3231aa Thomas Thrainer
        if ((node_uuid == instance.primary_node and not ignore_primary) or
1226 1c3231aa Thomas Thrainer
            (node_uuid != instance.primary_node and not result.offline)):
1227 763ad5be Thomas Thrainer
          all_result = False
1228 763ad5be Thomas Thrainer
  return all_result
1229 763ad5be Thomas Thrainer
1230 763ad5be Thomas Thrainer
1231 763ad5be Thomas Thrainer
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
1232 763ad5be Thomas Thrainer
  """Shutdown block devices of an instance.
1233 763ad5be Thomas Thrainer

1234 763ad5be Thomas Thrainer
  This function checks if an instance is running, before calling
1235 763ad5be Thomas Thrainer
  _ShutdownInstanceDisks.
1236 763ad5be Thomas Thrainer

1237 763ad5be Thomas Thrainer
  """
1238 5eacbcae Thomas Thrainer
  CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
1239 5eacbcae Thomas Thrainer
  ShutdownInstanceDisks(lu, instance, disks=disks)
1240 763ad5be Thomas Thrainer
1241 763ad5be Thomas Thrainer
1242 5eacbcae Thomas Thrainer
def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
1243 763ad5be Thomas Thrainer
                           ignore_size=False):
1244 763ad5be Thomas Thrainer
  """Prepare the block devices for an instance.
1245 763ad5be Thomas Thrainer

1246 763ad5be Thomas Thrainer
  This sets up the block devices on all nodes.
1247 763ad5be Thomas Thrainer

1248 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
1249 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
1250 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
1251 763ad5be Thomas Thrainer
  @param instance: the instance for whose disks we assemble
1252 763ad5be Thomas Thrainer
  @type disks: list of L{objects.Disk} or None
1253 763ad5be Thomas Thrainer
  @param disks: which disks to assemble (or all, if None)
1254 763ad5be Thomas Thrainer
  @type ignore_secondaries: boolean
1255 763ad5be Thomas Thrainer
  @param ignore_secondaries: if true, errors on secondary nodes
1256 763ad5be Thomas Thrainer
      won't result in an error return from the function
1257 763ad5be Thomas Thrainer
  @type ignore_size: boolean
1258 763ad5be Thomas Thrainer
  @param ignore_size: if true, the current known size of the disk
1259 763ad5be Thomas Thrainer
      will not be used during the disk activation, useful for cases
1260 763ad5be Thomas Thrainer
      when the size is wrong
1261 763ad5be Thomas Thrainer
  @return: False if the operation failed, otherwise a list of
1262 763ad5be Thomas Thrainer
      (host, instance_visible_name, node_visible_name)
1263 763ad5be Thomas Thrainer
      with the mapping from node devices to instance devices
1264 763ad5be Thomas Thrainer

1265 763ad5be Thomas Thrainer
  """
1266 763ad5be Thomas Thrainer
  device_info = []
1267 763ad5be Thomas Thrainer
  disks_ok = True
1268 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1269 763ad5be Thomas Thrainer
1270 763ad5be Thomas Thrainer
  # With the two passes mechanism we try to reduce the window of
1271 763ad5be Thomas Thrainer
  # opportunity for the race condition of switching DRBD to primary
1272 763ad5be Thomas Thrainer
  # before handshaking occured, but we do not eliminate it
1273 763ad5be Thomas Thrainer
1274 763ad5be Thomas Thrainer
  # The proper fix would be to wait (with some limits) until the
1275 763ad5be Thomas Thrainer
  # connection has been made and drbd transitions from WFConnection
1276 763ad5be Thomas Thrainer
  # into any other network-connected state (Connected, SyncTarget,
1277 763ad5be Thomas Thrainer
  # SyncSource, etc.)
1278 763ad5be Thomas Thrainer
1279 1d4a4b26 Thomas Thrainer
  # mark instance disks as active before doing actual work, so watcher does
1280 1d4a4b26 Thomas Thrainer
  # not try to shut them down erroneously
1281 da4a52a3 Thomas Thrainer
  lu.cfg.MarkInstanceDisksActive(instance.uuid)
1282 1d4a4b26 Thomas Thrainer
1283 763ad5be Thomas Thrainer
  # 1st pass, assemble on all nodes in secondary mode
1284 763ad5be Thomas Thrainer
  for idx, inst_disk in enumerate(disks):
1285 1c3231aa Thomas Thrainer
    for node_uuid, node_disk in inst_disk.ComputeNodeTree(
1286 1c3231aa Thomas Thrainer
                                  instance.primary_node):
1287 763ad5be Thomas Thrainer
      if ignore_size:
1288 763ad5be Thomas Thrainer
        node_disk = node_disk.Copy()
1289 763ad5be Thomas Thrainer
        node_disk.UnsetSize()
1290 1c3231aa Thomas Thrainer
      result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
1291 da4a52a3 Thomas Thrainer
                                             instance.name, False, idx)
1292 763ad5be Thomas Thrainer
      msg = result.fail_msg
1293 763ad5be Thomas Thrainer
      if msg:
1294 1c3231aa Thomas Thrainer
        is_offline_secondary = (node_uuid in instance.secondary_nodes and
1295 763ad5be Thomas Thrainer
                                result.offline)
1296 763ad5be Thomas Thrainer
        lu.LogWarning("Could not prepare block device %s on node %s"
1297 763ad5be Thomas Thrainer
                      " (is_primary=False, pass=1): %s",
1298 1c3231aa Thomas Thrainer
                      inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
1299 763ad5be Thomas Thrainer
        if not (ignore_secondaries or is_offline_secondary):
1300 763ad5be Thomas Thrainer
          disks_ok = False
1301 763ad5be Thomas Thrainer
1302 763ad5be Thomas Thrainer
  # FIXME: race condition on drbd migration to primary
1303 763ad5be Thomas Thrainer
1304 763ad5be Thomas Thrainer
  # 2nd pass, do only the primary node
1305 763ad5be Thomas Thrainer
  for idx, inst_disk in enumerate(disks):
1306 763ad5be Thomas Thrainer
    dev_path = None
1307 763ad5be Thomas Thrainer
1308 1c3231aa Thomas Thrainer
    for node_uuid, node_disk in inst_disk.ComputeNodeTree(
1309 1c3231aa Thomas Thrainer
                                  instance.primary_node):
1310 1c3231aa Thomas Thrainer
      if node_uuid != instance.primary_node:
1311 763ad5be Thomas Thrainer
        continue
1312 763ad5be Thomas Thrainer
      if ignore_size:
1313 763ad5be Thomas Thrainer
        node_disk = node_disk.Copy()
1314 763ad5be Thomas Thrainer
        node_disk.UnsetSize()
1315 1c3231aa Thomas Thrainer
      result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
1316 da4a52a3 Thomas Thrainer
                                             instance.name, True, idx)
1317 763ad5be Thomas Thrainer
      msg = result.fail_msg
1318 763ad5be Thomas Thrainer
      if msg:
1319 763ad5be Thomas Thrainer
        lu.LogWarning("Could not prepare block device %s on node %s"
1320 763ad5be Thomas Thrainer
                      " (is_primary=True, pass=2): %s",
1321 1c3231aa Thomas Thrainer
                      inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
1322 763ad5be Thomas Thrainer
        disks_ok = False
1323 763ad5be Thomas Thrainer
      else:
1324 763ad5be Thomas Thrainer
        dev_path = result.payload
1325 763ad5be Thomas Thrainer
1326 1c3231aa Thomas Thrainer
    device_info.append((lu.cfg.GetNodeName(instance.primary_node),
1327 1c3231aa Thomas Thrainer
                        inst_disk.iv_name, dev_path))
1328 763ad5be Thomas Thrainer
1329 1d4a4b26 Thomas Thrainer
  if not disks_ok:
1330 da4a52a3 Thomas Thrainer
    lu.cfg.MarkInstanceDisksInactive(instance.uuid)
1331 1d4a4b26 Thomas Thrainer
1332 763ad5be Thomas Thrainer
  return disks_ok, device_info
1333 763ad5be Thomas Thrainer
1334 763ad5be Thomas Thrainer
1335 5eacbcae Thomas Thrainer
def StartInstanceDisks(lu, instance, force):
1336 763ad5be Thomas Thrainer
  """Start the disks of an instance.
1337 763ad5be Thomas Thrainer

1338 763ad5be Thomas Thrainer
  """
1339 5eacbcae Thomas Thrainer
  disks_ok, _ = AssembleInstanceDisks(lu, instance,
1340 5eacbcae Thomas Thrainer
                                      ignore_secondaries=force)
1341 763ad5be Thomas Thrainer
  if not disks_ok:
1342 5eacbcae Thomas Thrainer
    ShutdownInstanceDisks(lu, instance)
1343 763ad5be Thomas Thrainer
    if force is not None and not force:
1344 763ad5be Thomas Thrainer
      lu.LogWarning("",
1345 763ad5be Thomas Thrainer
                    hint=("If the message above refers to a secondary node,"
1346 763ad5be Thomas Thrainer
                          " you can retry the operation using '--force'"))
1347 763ad5be Thomas Thrainer
    raise errors.OpExecError("Disk consistency error")
1348 763ad5be Thomas Thrainer
1349 763ad5be Thomas Thrainer
1350 763ad5be Thomas Thrainer
class LUInstanceGrowDisk(LogicalUnit):
1351 763ad5be Thomas Thrainer
  """Grow a disk of an instance.
1352 763ad5be Thomas Thrainer

1353 763ad5be Thomas Thrainer
  """
1354 763ad5be Thomas Thrainer
  HPATH = "disk-grow"
1355 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
1356 763ad5be Thomas Thrainer
  REQ_BGL = False
1357 763ad5be Thomas Thrainer
1358 763ad5be Thomas Thrainer
  def ExpandNames(self):
1359 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1360 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1361 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1362 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1363 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
1364 763ad5be Thomas Thrainer
1365 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1366 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1367 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1368 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
1369 763ad5be Thomas Thrainer
      # Copy node locks
1370 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1371 5eacbcae Thomas Thrainer
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1372 763ad5be Thomas Thrainer
1373 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
1374 763ad5be Thomas Thrainer
    """Build hooks env.
1375 763ad5be Thomas Thrainer

1376 763ad5be Thomas Thrainer
    This runs on the master, the primary and all the secondaries.
1377 763ad5be Thomas Thrainer

1378 763ad5be Thomas Thrainer
    """
1379 763ad5be Thomas Thrainer
    env = {
1380 763ad5be Thomas Thrainer
      "DISK": self.op.disk,
1381 763ad5be Thomas Thrainer
      "AMOUNT": self.op.amount,
1382 763ad5be Thomas Thrainer
      "ABSOLUTE": self.op.absolute,
1383 763ad5be Thomas Thrainer
      }
1384 5eacbcae Thomas Thrainer
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1385 763ad5be Thomas Thrainer
    return env
1386 763ad5be Thomas Thrainer
1387 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
1388 763ad5be Thomas Thrainer
    """Build hooks nodes.
1389 763ad5be Thomas Thrainer

1390 763ad5be Thomas Thrainer
    """
1391 763ad5be Thomas Thrainer
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1392 763ad5be Thomas Thrainer
    return (nl, nl)
1393 763ad5be Thomas Thrainer
1394 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1395 763ad5be Thomas Thrainer
    """Check prerequisites.
1396 763ad5be Thomas Thrainer

1397 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1398 763ad5be Thomas Thrainer

1399 763ad5be Thomas Thrainer
    """
1400 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1401 da4a52a3 Thomas Thrainer
    assert self.instance is not None, \
1402 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1403 da4a52a3 Thomas Thrainer
    node_uuids = list(self.instance.all_nodes)
1404 1c3231aa Thomas Thrainer
    for node_uuid in node_uuids:
1405 1c3231aa Thomas Thrainer
      CheckNodeOnline(self, node_uuid)
1406 e43a624e Bernardo Dal Seno
    self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
1407 763ad5be Thomas Thrainer
1408 da4a52a3 Thomas Thrainer
    if self.instance.disk_template not in constants.DTS_GROWABLE:
1409 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Instance's disk layout does not support"
1410 763ad5be Thomas Thrainer
                                 " growing", errors.ECODE_INVAL)
1411 763ad5be Thomas Thrainer
1412 da4a52a3 Thomas Thrainer
    self.disk = self.instance.FindDisk(self.op.disk)
1413 763ad5be Thomas Thrainer
1414 763ad5be Thomas Thrainer
    if self.op.absolute:
1415 763ad5be Thomas Thrainer
      self.target = self.op.amount
1416 763ad5be Thomas Thrainer
      self.delta = self.target - self.disk.size
1417 763ad5be Thomas Thrainer
      if self.delta < 0:
1418 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Requested size (%s) is smaller than "
1419 763ad5be Thomas Thrainer
                                   "current disk size (%s)" %
1420 763ad5be Thomas Thrainer
                                   (utils.FormatUnit(self.target, "h"),
1421 763ad5be Thomas Thrainer
                                    utils.FormatUnit(self.disk.size, "h")),
1422 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
1423 763ad5be Thomas Thrainer
    else:
1424 763ad5be Thomas Thrainer
      self.delta = self.op.amount
1425 763ad5be Thomas Thrainer
      self.target = self.disk.size + self.delta
1426 763ad5be Thomas Thrainer
      if self.delta < 0:
1427 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Requested increment (%s) is negative" %
1428 763ad5be Thomas Thrainer
                                   utils.FormatUnit(self.delta, "h"),
1429 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
1430 763ad5be Thomas Thrainer
1431 1c3231aa Thomas Thrainer
    self._CheckDiskSpace(node_uuids, self.disk.ComputeGrowth(self.delta))
1432 763ad5be Thomas Thrainer
1433 1c3231aa Thomas Thrainer
  def _CheckDiskSpace(self, node_uuids, req_vgspace):
1434 763ad5be Thomas Thrainer
    template = self.instance.disk_template
1435 8e5a911a Bernardo Dal Seno
    if (template not in (constants.DTS_NO_FREE_SPACE_CHECK) and
1436 8e5a911a Bernardo Dal Seno
        not any(self.node_es_flags.values())):
1437 763ad5be Thomas Thrainer
      # TODO: check the free disk space for file, when that feature will be
1438 763ad5be Thomas Thrainer
      # supported
1439 8e5a911a Bernardo Dal Seno
      # With exclusive storage we need to do something smarter than just looking
1440 8e5a911a Bernardo Dal Seno
      # at free space, which, in the end, is basically a dry run. So we rely on
1441 8e5a911a Bernardo Dal Seno
      # the dry run performed in Exec() instead.
1442 1c3231aa Thomas Thrainer
      CheckNodesFreeDiskPerVG(self, node_uuids, req_vgspace)
1443 763ad5be Thomas Thrainer
1444 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1445 763ad5be Thomas Thrainer
    """Execute disk grow.
1446 763ad5be Thomas Thrainer

1447 763ad5be Thomas Thrainer
    """
1448 d0d7d7cf Thomas Thrainer
    assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1449 763ad5be Thomas Thrainer
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1450 763ad5be Thomas Thrainer
            self.owned_locks(locking.LEVEL_NODE_RES))
1451 763ad5be Thomas Thrainer
1452 763ad5be Thomas Thrainer
    wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
1453 763ad5be Thomas Thrainer
1454 d0d7d7cf Thomas Thrainer
    disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[self.disk])
1455 763ad5be Thomas Thrainer
    if not disks_ok:
1456 763ad5be Thomas Thrainer
      raise errors.OpExecError("Cannot activate block device to grow")
1457 763ad5be Thomas Thrainer
1458 763ad5be Thomas Thrainer
    feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
1459 d0d7d7cf Thomas Thrainer
                (self.op.disk, self.instance.name,
1460 763ad5be Thomas Thrainer
                 utils.FormatUnit(self.delta, "h"),
1461 763ad5be Thomas Thrainer
                 utils.FormatUnit(self.target, "h")))
1462 763ad5be Thomas Thrainer
1463 763ad5be Thomas Thrainer
    # First run all grow ops in dry-run mode
1464 d0d7d7cf Thomas Thrainer
    for node_uuid in self.instance.all_nodes:
1465 d0d7d7cf Thomas Thrainer
      result = self.rpc.call_blockdev_grow(node_uuid,
1466 d0d7d7cf Thomas Thrainer
                                           (self.disk, self.instance),
1467 e43a624e Bernardo Dal Seno
                                           self.delta, True, True,
1468 e43a624e Bernardo Dal Seno
                                           self.node_es_flags[node_uuid])
1469 1c3231aa Thomas Thrainer
      result.Raise("Dry-run grow request failed to node %s" %
1470 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(node_uuid))
1471 763ad5be Thomas Thrainer
1472 763ad5be Thomas Thrainer
    if wipe_disks:
1473 763ad5be Thomas Thrainer
      # Get disk size from primary node for wiping
1474 0c3d9c7c Thomas Thrainer
      result = self.rpc.call_blockdev_getdimensions(
1475 d66acf3d Thomas Thrainer
                 self.instance.primary_node, [([self.disk], self.instance)])
1476 763ad5be Thomas Thrainer
      result.Raise("Failed to retrieve disk size from node '%s'" %
1477 d0d7d7cf Thomas Thrainer
                   self.instance.primary_node)
1478 763ad5be Thomas Thrainer
1479 6ef8077e Bernardo Dal Seno
      (disk_dimensions, ) = result.payload
1480 763ad5be Thomas Thrainer
1481 6ef8077e Bernardo Dal Seno
      if disk_dimensions is None:
1482 763ad5be Thomas Thrainer
        raise errors.OpExecError("Failed to retrieve disk size from primary"
1483 d0d7d7cf Thomas Thrainer
                                 " node '%s'" % self.instance.primary_node)
1484 6ef8077e Bernardo Dal Seno
      (disk_size_in_bytes, _) = disk_dimensions
1485 763ad5be Thomas Thrainer
1486 763ad5be Thomas Thrainer
      old_disk_size = _DiskSizeInBytesToMebibytes(self, disk_size_in_bytes)
1487 763ad5be Thomas Thrainer
1488 d0d7d7cf Thomas Thrainer
      assert old_disk_size >= self.disk.size, \
1489 763ad5be Thomas Thrainer
        ("Retrieved disk size too small (got %s, should be at least %s)" %
1490 d0d7d7cf Thomas Thrainer
         (old_disk_size, self.disk.size))
1491 763ad5be Thomas Thrainer
    else:
1492 763ad5be Thomas Thrainer
      old_disk_size = None
1493 763ad5be Thomas Thrainer
1494 763ad5be Thomas Thrainer
    # We know that (as far as we can test) operations across different
1495 763ad5be Thomas Thrainer
    # nodes will succeed, time to run it for real on the backing storage
1496 d0d7d7cf Thomas Thrainer
    for node_uuid in self.instance.all_nodes:
1497 d0d7d7cf Thomas Thrainer
      result = self.rpc.call_blockdev_grow(node_uuid,
1498 d0d7d7cf Thomas Thrainer
                                           (self.disk, self.instance),
1499 e43a624e Bernardo Dal Seno
                                           self.delta, False, True,
1500 e43a624e Bernardo Dal Seno
                                           self.node_es_flags[node_uuid])
1501 1c3231aa Thomas Thrainer
      result.Raise("Grow request failed to node %s" %
1502 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(node_uuid))
1503 763ad5be Thomas Thrainer
1504 763ad5be Thomas Thrainer
    # And now execute it for logical storage, on the primary node
1505 d0d7d7cf Thomas Thrainer
    node_uuid = self.instance.primary_node
1506 d0d7d7cf Thomas Thrainer
    result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance),
1507 e43a624e Bernardo Dal Seno
                                         self.delta, False, False,
1508 e43a624e Bernardo Dal Seno
                                         self.node_es_flags[node_uuid])
1509 1c3231aa Thomas Thrainer
    result.Raise("Grow request failed to node %s" %
1510 1c3231aa Thomas Thrainer
                 self.cfg.GetNodeName(node_uuid))
1511 763ad5be Thomas Thrainer
1512 d0d7d7cf Thomas Thrainer
    self.disk.RecordGrow(self.delta)
1513 d0d7d7cf Thomas Thrainer
    self.cfg.Update(self.instance, feedback_fn)
1514 763ad5be Thomas Thrainer
1515 763ad5be Thomas Thrainer
    # Changes have been recorded, release node lock
1516 5eacbcae Thomas Thrainer
    ReleaseLocks(self, locking.LEVEL_NODE)
1517 763ad5be Thomas Thrainer
1518 763ad5be Thomas Thrainer
    # Downgrade lock while waiting for sync
1519 763ad5be Thomas Thrainer
    self.glm.downgrade(locking.LEVEL_INSTANCE)
1520 763ad5be Thomas Thrainer
1521 763ad5be Thomas Thrainer
    assert wipe_disks ^ (old_disk_size is None)
1522 763ad5be Thomas Thrainer
1523 763ad5be Thomas Thrainer
    if wipe_disks:
1524 d0d7d7cf Thomas Thrainer
      assert self.instance.disks[self.op.disk] == self.disk
1525 763ad5be Thomas Thrainer
1526 763ad5be Thomas Thrainer
      # Wipe newly added disk space
1527 d0d7d7cf Thomas Thrainer
      WipeDisks(self, self.instance,
1528 d0d7d7cf Thomas Thrainer
                disks=[(self.op.disk, self.disk, old_disk_size)])
1529 763ad5be Thomas Thrainer
1530 763ad5be Thomas Thrainer
    if self.op.wait_for_sync:
1531 d0d7d7cf Thomas Thrainer
      disk_abort = not WaitForSync(self, self.instance, disks=[self.disk])
1532 763ad5be Thomas Thrainer
      if disk_abort:
1533 763ad5be Thomas Thrainer
        self.LogWarning("Disk syncing has not returned a good status; check"
1534 763ad5be Thomas Thrainer
                        " the instance")
1535 d0d7d7cf Thomas Thrainer
      if not self.instance.disks_active:
1536 d0d7d7cf Thomas Thrainer
        _SafeShutdownInstanceDisks(self, self.instance, disks=[self.disk])
1537 d0d7d7cf Thomas Thrainer
    elif not self.instance.disks_active:
1538 763ad5be Thomas Thrainer
      self.LogWarning("Not shutting down the disk even if the instance is"
1539 763ad5be Thomas Thrainer
                      " not supposed to be running because no wait for"
1540 763ad5be Thomas Thrainer
                      " sync mode was requested")
1541 763ad5be Thomas Thrainer
1542 763ad5be Thomas Thrainer
    assert self.owned_locks(locking.LEVEL_NODE_RES)
1543 d0d7d7cf Thomas Thrainer
    assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1544 763ad5be Thomas Thrainer
1545 763ad5be Thomas Thrainer
1546 763ad5be Thomas Thrainer
class LUInstanceReplaceDisks(LogicalUnit):
1547 763ad5be Thomas Thrainer
  """Replace the disks of an instance.
1548 763ad5be Thomas Thrainer

1549 763ad5be Thomas Thrainer
  """
1550 763ad5be Thomas Thrainer
  HPATH = "mirrors-replace"
1551 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
1552 763ad5be Thomas Thrainer
  REQ_BGL = False
1553 763ad5be Thomas Thrainer
1554 763ad5be Thomas Thrainer
  def CheckArguments(self):
1555 763ad5be Thomas Thrainer
    """Check arguments.
1556 763ad5be Thomas Thrainer

1557 763ad5be Thomas Thrainer
    """
1558 763ad5be Thomas Thrainer
    if self.op.mode == constants.REPLACE_DISK_CHG:
1559 d0d7d7cf Thomas Thrainer
      if self.op.remote_node is None and self.op.iallocator is None:
1560 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("When changing the secondary either an"
1561 763ad5be Thomas Thrainer
                                   " iallocator script must be used or the"
1562 763ad5be Thomas Thrainer
                                   " new node given", errors.ECODE_INVAL)
1563 763ad5be Thomas Thrainer
      else:
1564 5eacbcae Thomas Thrainer
        CheckIAllocatorOrNode(self, "iallocator", "remote_node")
1565 763ad5be Thomas Thrainer
1566 d0d7d7cf Thomas Thrainer
    elif self.op.remote_node is not None or self.op.iallocator is not None:
1567 763ad5be Thomas Thrainer
      # Not replacing the secondary
1568 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The iallocator and new node options can"
1569 763ad5be Thomas Thrainer
                                 " only be used when changing the"
1570 763ad5be Thomas Thrainer
                                 " secondary node", errors.ECODE_INVAL)
1571 763ad5be Thomas Thrainer
1572 763ad5be Thomas Thrainer
  def ExpandNames(self):
1573 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1574 763ad5be Thomas Thrainer
1575 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODE not in self.needed_locks
1576 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODE_RES not in self.needed_locks
1577 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODEGROUP not in self.needed_locks
1578 763ad5be Thomas Thrainer
1579 763ad5be Thomas Thrainer
    assert self.op.iallocator is None or self.op.remote_node is None, \
1580 763ad5be Thomas Thrainer
      "Conflicting options"
1581 763ad5be Thomas Thrainer
1582 763ad5be Thomas Thrainer
    if self.op.remote_node is not None:
1583 1c3231aa Thomas Thrainer
      (self.op.remote_node_uuid, self.op.remote_node) = \
1584 1c3231aa Thomas Thrainer
        ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
1585 1c3231aa Thomas Thrainer
                              self.op.remote_node)
1586 763ad5be Thomas Thrainer
1587 763ad5be Thomas Thrainer
      # Warning: do not remove the locking of the new secondary here
1588 1bb99a33 Bernardo Dal Seno
      # unless DRBD8Dev.AddChildren is changed to work in parallel;
1589 763ad5be Thomas Thrainer
      # currently it doesn't since parallel invocations of
1590 763ad5be Thomas Thrainer
      # FindUnusedMinor will conflict
1591 1c3231aa Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node_uuid]
1592 763ad5be Thomas Thrainer
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1593 763ad5be Thomas Thrainer
    else:
1594 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = []
1595 763ad5be Thomas Thrainer
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1596 763ad5be Thomas Thrainer
1597 763ad5be Thomas Thrainer
      if self.op.iallocator is not None:
1598 763ad5be Thomas Thrainer
        # iallocator will select a new node in the same group
1599 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
1600 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
1601 763ad5be Thomas Thrainer
1602 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1603 763ad5be Thomas Thrainer
1604 da4a52a3 Thomas Thrainer
    self.replacer = TLReplaceDisks(self, self.op.instance_uuid,
1605 da4a52a3 Thomas Thrainer
                                   self.op.instance_name, self.op.mode,
1606 1c3231aa Thomas Thrainer
                                   self.op.iallocator, self.op.remote_node_uuid,
1607 763ad5be Thomas Thrainer
                                   self.op.disks, self.op.early_release,
1608 763ad5be Thomas Thrainer
                                   self.op.ignore_ipolicy)
1609 763ad5be Thomas Thrainer
1610 763ad5be Thomas Thrainer
    self.tasklets = [self.replacer]
1611 763ad5be Thomas Thrainer
1612 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1613 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODEGROUP:
1614 1c3231aa Thomas Thrainer
      assert self.op.remote_node_uuid is None
1615 763ad5be Thomas Thrainer
      assert self.op.iallocator is not None
1616 763ad5be Thomas Thrainer
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
1617 763ad5be Thomas Thrainer
1618 763ad5be Thomas Thrainer
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
1619 763ad5be Thomas Thrainer
      # Lock all groups used by instance optimistically; this requires going
1620 763ad5be Thomas Thrainer
      # via the node before it's locked, requiring verification later on
1621 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
1622 da4a52a3 Thomas Thrainer
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
1623 763ad5be Thomas Thrainer
1624 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE:
1625 763ad5be Thomas Thrainer
      if self.op.iallocator is not None:
1626 1c3231aa Thomas Thrainer
        assert self.op.remote_node_uuid is None
1627 763ad5be Thomas Thrainer
        assert not self.needed_locks[locking.LEVEL_NODE]
1628 763ad5be Thomas Thrainer
        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
1629 763ad5be Thomas Thrainer
1630 763ad5be Thomas Thrainer
        # Lock member nodes of all locked groups
1631 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE] = \
1632 1c3231aa Thomas Thrainer
          [node_uuid
1633 763ad5be Thomas Thrainer
           for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
1634 1c3231aa Thomas Thrainer
           for node_uuid in self.cfg.GetNodeGroup(group_uuid).members]
1635 763ad5be Thomas Thrainer
      else:
1636 763ad5be Thomas Thrainer
        assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1637 763ad5be Thomas Thrainer
1638 763ad5be Thomas Thrainer
        self._LockInstancesNodes()
1639 763ad5be Thomas Thrainer
1640 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
1641 763ad5be Thomas Thrainer
      # Reuse node locks
1642 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1643 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE]
1644 763ad5be Thomas Thrainer
1645 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
1646 763ad5be Thomas Thrainer
    """Build hooks env.
1647 763ad5be Thomas Thrainer

1648 763ad5be Thomas Thrainer
    This runs on the master, the primary and all the secondaries.
1649 763ad5be Thomas Thrainer

1650 763ad5be Thomas Thrainer
    """
1651 763ad5be Thomas Thrainer
    instance = self.replacer.instance
1652 763ad5be Thomas Thrainer
    env = {
1653 763ad5be Thomas Thrainer
      "MODE": self.op.mode,
1654 763ad5be Thomas Thrainer
      "NEW_SECONDARY": self.op.remote_node,
1655 1c3231aa Thomas Thrainer
      "OLD_SECONDARY": self.cfg.GetNodeName(instance.secondary_nodes[0]),
1656 763ad5be Thomas Thrainer
      }
1657 5eacbcae Thomas Thrainer
    env.update(BuildInstanceHookEnvByObject(self, instance))
1658 763ad5be Thomas Thrainer
    return env
1659 763ad5be Thomas Thrainer
1660 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
1661 763ad5be Thomas Thrainer
    """Build hooks nodes.
1662 763ad5be Thomas Thrainer

1663 763ad5be Thomas Thrainer
    """
1664 763ad5be Thomas Thrainer
    instance = self.replacer.instance
1665 763ad5be Thomas Thrainer
    nl = [
1666 763ad5be Thomas Thrainer
      self.cfg.GetMasterNode(),
1667 763ad5be Thomas Thrainer
      instance.primary_node,
1668 763ad5be Thomas Thrainer
      ]
1669 1c3231aa Thomas Thrainer
    if self.op.remote_node_uuid is not None:
1670 1c3231aa Thomas Thrainer
      nl.append(self.op.remote_node_uuid)
1671 763ad5be Thomas Thrainer
    return nl, nl
1672 763ad5be Thomas Thrainer
1673 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1674 763ad5be Thomas Thrainer
    """Check prerequisites.
1675 763ad5be Thomas Thrainer

1676 763ad5be Thomas Thrainer
    """
1677 763ad5be Thomas Thrainer
    assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
1678 763ad5be Thomas Thrainer
            self.op.iallocator is None)
1679 763ad5be Thomas Thrainer
1680 763ad5be Thomas Thrainer
    # Verify if node group locks are still correct
1681 763ad5be Thomas Thrainer
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
1682 763ad5be Thomas Thrainer
    if owned_groups:
1683 da4a52a3 Thomas Thrainer
      CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, owned_groups)
1684 763ad5be Thomas Thrainer
1685 763ad5be Thomas Thrainer
    return LogicalUnit.CheckPrereq(self)
1686 763ad5be Thomas Thrainer
1687 763ad5be Thomas Thrainer
1688 763ad5be Thomas Thrainer
class LUInstanceActivateDisks(NoHooksLU):
1689 763ad5be Thomas Thrainer
  """Bring up an instance's disks.
1690 763ad5be Thomas Thrainer

1691 763ad5be Thomas Thrainer
  """
1692 763ad5be Thomas Thrainer
  REQ_BGL = False
1693 763ad5be Thomas Thrainer
1694 763ad5be Thomas Thrainer
  def ExpandNames(self):
1695 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1696 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1697 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1698 763ad5be Thomas Thrainer
1699 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1700 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1701 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1702 763ad5be Thomas Thrainer
1703 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1704 763ad5be Thomas Thrainer
    """Check prerequisites.
1705 763ad5be Thomas Thrainer

1706 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1707 763ad5be Thomas Thrainer

1708 763ad5be Thomas Thrainer
    """
1709 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1710 763ad5be Thomas Thrainer
    assert self.instance is not None, \
1711 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1712 5eacbcae Thomas Thrainer
    CheckNodeOnline(self, self.instance.primary_node)
1713 763ad5be Thomas Thrainer
1714 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1715 763ad5be Thomas Thrainer
    """Activate the disks.
1716 763ad5be Thomas Thrainer

1717 763ad5be Thomas Thrainer
    """
1718 763ad5be Thomas Thrainer
    disks_ok, disks_info = \
1719 5eacbcae Thomas Thrainer
              AssembleInstanceDisks(self, self.instance,
1720 5eacbcae Thomas Thrainer
                                    ignore_size=self.op.ignore_size)
1721 763ad5be Thomas Thrainer
    if not disks_ok:
1722 763ad5be Thomas Thrainer
      raise errors.OpExecError("Cannot activate block devices")
1723 763ad5be Thomas Thrainer
1724 763ad5be Thomas Thrainer
    if self.op.wait_for_sync:
1725 5eacbcae Thomas Thrainer
      if not WaitForSync(self, self.instance):
1726 da4a52a3 Thomas Thrainer
        self.cfg.MarkInstanceDisksInactive(self.instance.uuid)
1727 763ad5be Thomas Thrainer
        raise errors.OpExecError("Some disks of the instance are degraded!")
1728 763ad5be Thomas Thrainer
1729 763ad5be Thomas Thrainer
    return disks_info
1730 763ad5be Thomas Thrainer
1731 763ad5be Thomas Thrainer
1732 763ad5be Thomas Thrainer
class LUInstanceDeactivateDisks(NoHooksLU):
1733 763ad5be Thomas Thrainer
  """Shutdown an instance's disks.
1734 763ad5be Thomas Thrainer

1735 763ad5be Thomas Thrainer
  """
1736 763ad5be Thomas Thrainer
  REQ_BGL = False
1737 763ad5be Thomas Thrainer
1738 763ad5be Thomas Thrainer
  def ExpandNames(self):
1739 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1740 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1741 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1742 763ad5be Thomas Thrainer
1743 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1744 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1745 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1746 763ad5be Thomas Thrainer
1747 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1748 763ad5be Thomas Thrainer
    """Check prerequisites.
1749 763ad5be Thomas Thrainer

1750 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1751 763ad5be Thomas Thrainer

1752 763ad5be Thomas Thrainer
    """
1753 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1754 763ad5be Thomas Thrainer
    assert self.instance is not None, \
1755 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1756 763ad5be Thomas Thrainer
1757 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1758 763ad5be Thomas Thrainer
    """Deactivate the disks
1759 763ad5be Thomas Thrainer

1760 763ad5be Thomas Thrainer
    """
1761 763ad5be Thomas Thrainer
    if self.op.force:
1762 d0d7d7cf Thomas Thrainer
      ShutdownInstanceDisks(self, self.instance)
1763 763ad5be Thomas Thrainer
    else:
1764 d0d7d7cf Thomas Thrainer
      _SafeShutdownInstanceDisks(self, self.instance)
1765 763ad5be Thomas Thrainer
1766 763ad5be Thomas Thrainer
1767 1c3231aa Thomas Thrainer
def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
1768 763ad5be Thomas Thrainer
                               ldisk=False):
1769 763ad5be Thomas Thrainer
  """Check that mirrors are not degraded.
1770 763ad5be Thomas Thrainer

1771 763ad5be Thomas Thrainer
  @attention: The device has to be annotated already.
1772 763ad5be Thomas Thrainer

1773 763ad5be Thomas Thrainer
  The ldisk parameter, if True, will change the test from the
1774 763ad5be Thomas Thrainer
  is_degraded attribute (which represents overall non-ok status for
1775 763ad5be Thomas Thrainer
  the device(s)) to the ldisk (representing the local storage status).
1776 763ad5be Thomas Thrainer

1777 763ad5be Thomas Thrainer
  """
1778 763ad5be Thomas Thrainer
  result = True
1779 763ad5be Thomas Thrainer
1780 763ad5be Thomas Thrainer
  if on_primary or dev.AssembleOnSecondary():
1781 0c3d9c7c Thomas Thrainer
    rstats = lu.rpc.call_blockdev_find(node_uuid, (dev, instance))
1782 763ad5be Thomas Thrainer
    msg = rstats.fail_msg
1783 763ad5be Thomas Thrainer
    if msg:
1784 1c3231aa Thomas Thrainer
      lu.LogWarning("Can't find disk on node %s: %s",
1785 1c3231aa Thomas Thrainer
                    lu.cfg.GetNodeName(node_uuid), msg)
1786 763ad5be Thomas Thrainer
      result = False
1787 763ad5be Thomas Thrainer
    elif not rstats.payload:
1788 1c3231aa Thomas Thrainer
      lu.LogWarning("Can't find disk on node %s", lu.cfg.GetNodeName(node_uuid))
1789 763ad5be Thomas Thrainer
      result = False
1790 763ad5be Thomas Thrainer
    else:
1791 763ad5be Thomas Thrainer
      if ldisk:
1792 763ad5be Thomas Thrainer
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
1793 763ad5be Thomas Thrainer
      else:
1794 763ad5be Thomas Thrainer
        result = result and not rstats.payload.is_degraded
1795 763ad5be Thomas Thrainer
1796 763ad5be Thomas Thrainer
  if dev.children:
1797 763ad5be Thomas Thrainer
    for child in dev.children:
1798 1c3231aa Thomas Thrainer
      result = result and _CheckDiskConsistencyInner(lu, instance, child,
1799 1c3231aa Thomas Thrainer
                                                     node_uuid, on_primary)
1800 763ad5be Thomas Thrainer
1801 763ad5be Thomas Thrainer
  return result
1802 763ad5be Thomas Thrainer
1803 763ad5be Thomas Thrainer
1804 1c3231aa Thomas Thrainer
def CheckDiskConsistency(lu, instance, dev, node_uuid, on_primary, ldisk=False):
1805 763ad5be Thomas Thrainer
  """Wrapper around L{_CheckDiskConsistencyInner}.
1806 763ad5be Thomas Thrainer

1807 763ad5be Thomas Thrainer
  """
1808 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
1809 1c3231aa Thomas Thrainer
  return _CheckDiskConsistencyInner(lu, instance, disk, node_uuid, on_primary,
1810 763ad5be Thomas Thrainer
                                    ldisk=ldisk)
1811 763ad5be Thomas Thrainer
1812 763ad5be Thomas Thrainer
1813 1c3231aa Thomas Thrainer
def _BlockdevFind(lu, node_uuid, dev, instance):
1814 763ad5be Thomas Thrainer
  """Wrapper around call_blockdev_find to annotate diskparams.
1815 763ad5be Thomas Thrainer

1816 763ad5be Thomas Thrainer
  @param lu: A reference to the lu object
1817 1c3231aa Thomas Thrainer
  @param node_uuid: The node to call out
1818 763ad5be Thomas Thrainer
  @param dev: The device to find
1819 763ad5be Thomas Thrainer
  @param instance: The instance object the device belongs to
1820 763ad5be Thomas Thrainer
  @returns The result of the rpc call
1821 763ad5be Thomas Thrainer

1822 763ad5be Thomas Thrainer
  """
1823 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
1824 0c3d9c7c Thomas Thrainer
  return lu.rpc.call_blockdev_find(node_uuid, (disk, instance))
1825 763ad5be Thomas Thrainer
1826 763ad5be Thomas Thrainer
1827 763ad5be Thomas Thrainer
def _GenerateUniqueNames(lu, exts):
1828 763ad5be Thomas Thrainer
  """Generate a suitable LV name.
1829 763ad5be Thomas Thrainer

1830 763ad5be Thomas Thrainer
  This will generate a logical volume name for the given instance.
1831 763ad5be Thomas Thrainer

1832 763ad5be Thomas Thrainer
  """
1833 763ad5be Thomas Thrainer
  results = []
1834 763ad5be Thomas Thrainer
  for val in exts:
1835 763ad5be Thomas Thrainer
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
1836 763ad5be Thomas Thrainer
    results.append("%s%s" % (new_id, val))
1837 763ad5be Thomas Thrainer
  return results
1838 763ad5be Thomas Thrainer
1839 763ad5be Thomas Thrainer
1840 763ad5be Thomas Thrainer
class TLReplaceDisks(Tasklet):
1841 763ad5be Thomas Thrainer
  """Replaces disks for an instance.
1842 763ad5be Thomas Thrainer

1843 763ad5be Thomas Thrainer
  Note: Locking is not within the scope of this class.
1844 763ad5be Thomas Thrainer

1845 763ad5be Thomas Thrainer
  """
1846 da4a52a3 Thomas Thrainer
  def __init__(self, lu, instance_uuid, instance_name, mode, iallocator_name,
1847 da4a52a3 Thomas Thrainer
               remote_node_uuid, disks, early_release, ignore_ipolicy):
1848 763ad5be Thomas Thrainer
    """Initializes this class.
1849 763ad5be Thomas Thrainer

1850 763ad5be Thomas Thrainer
    """
1851 763ad5be Thomas Thrainer
    Tasklet.__init__(self, lu)
1852 763ad5be Thomas Thrainer
1853 763ad5be Thomas Thrainer
    # Parameters
1854 da4a52a3 Thomas Thrainer
    self.instance_uuid = instance_uuid
1855 763ad5be Thomas Thrainer
    self.instance_name = instance_name
1856 763ad5be Thomas Thrainer
    self.mode = mode
1857 763ad5be Thomas Thrainer
    self.iallocator_name = iallocator_name
1858 1c3231aa Thomas Thrainer
    self.remote_node_uuid = remote_node_uuid
1859 763ad5be Thomas Thrainer
    self.disks = disks
1860 763ad5be Thomas Thrainer
    self.early_release = early_release
1861 763ad5be Thomas Thrainer
    self.ignore_ipolicy = ignore_ipolicy
1862 763ad5be Thomas Thrainer
1863 763ad5be Thomas Thrainer
    # Runtime data
1864 763ad5be Thomas Thrainer
    self.instance = None
1865 1c3231aa Thomas Thrainer
    self.new_node_uuid = None
1866 1c3231aa Thomas Thrainer
    self.target_node_uuid = None
1867 1c3231aa Thomas Thrainer
    self.other_node_uuid = None
1868 763ad5be Thomas Thrainer
    self.remote_node_info = None
1869 763ad5be Thomas Thrainer
    self.node_secondary_ip = None
1870 763ad5be Thomas Thrainer
1871 763ad5be Thomas Thrainer
  @staticmethod
1872 da4a52a3 Thomas Thrainer
  def _RunAllocator(lu, iallocator_name, instance_uuid,
1873 1c3231aa Thomas Thrainer
                    relocate_from_node_uuids):
1874 763ad5be Thomas Thrainer
    """Compute a new secondary node using an IAllocator.
1875 763ad5be Thomas Thrainer

1876 763ad5be Thomas Thrainer
    """
1877 1c3231aa Thomas Thrainer
    req = iallocator.IAReqRelocate(
1878 da4a52a3 Thomas Thrainer
          inst_uuid=instance_uuid,
1879 1c3231aa Thomas Thrainer
          relocate_from_node_uuids=list(relocate_from_node_uuids))
1880 763ad5be Thomas Thrainer
    ial = iallocator.IAllocator(lu.cfg, lu.rpc, req)
1881 763ad5be Thomas Thrainer
1882 763ad5be Thomas Thrainer
    ial.Run(iallocator_name)
1883 763ad5be Thomas Thrainer
1884 763ad5be Thomas Thrainer
    if not ial.success:
1885 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
1886 763ad5be Thomas Thrainer
                                 " %s" % (iallocator_name, ial.info),
1887 763ad5be Thomas Thrainer
                                 errors.ECODE_NORES)
1888 763ad5be Thomas Thrainer
1889 763ad5be Thomas Thrainer
    remote_node_name = ial.result[0]
1890 1c3231aa Thomas Thrainer
    remote_node = lu.cfg.GetNodeInfoByName(remote_node_name)
1891 1c3231aa Thomas Thrainer
1892 1c3231aa Thomas Thrainer
    if remote_node is None:
1893 1c3231aa Thomas Thrainer
      raise errors.OpPrereqError("Node %s not found in configuration" %
1894 1c3231aa Thomas Thrainer
                                 remote_node_name, errors.ECODE_NOENT)
1895 763ad5be Thomas Thrainer
1896 763ad5be Thomas Thrainer
    lu.LogInfo("Selected new secondary for instance '%s': %s",
1897 da4a52a3 Thomas Thrainer
               instance_uuid, remote_node_name)
1898 763ad5be Thomas Thrainer
1899 1c3231aa Thomas Thrainer
    return remote_node.uuid
1900 763ad5be Thomas Thrainer
1901 1c3231aa Thomas Thrainer
  def _FindFaultyDisks(self, node_uuid):
1902 5eacbcae Thomas Thrainer
    """Wrapper for L{FindFaultyInstanceDisks}.
1903 763ad5be Thomas Thrainer

1904 763ad5be Thomas Thrainer
    """
1905 5eacbcae Thomas Thrainer
    return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
1906 1c3231aa Thomas Thrainer
                                   node_uuid, True)
1907 763ad5be Thomas Thrainer
1908 763ad5be Thomas Thrainer
  def _CheckDisksActivated(self, instance):
1909 763ad5be Thomas Thrainer
    """Checks if the instance disks are activated.
1910 763ad5be Thomas Thrainer

1911 763ad5be Thomas Thrainer
    @param instance: The instance to check disks
1912 763ad5be Thomas Thrainer
    @return: True if they are activated, False otherwise
1913 763ad5be Thomas Thrainer

1914 763ad5be Thomas Thrainer
    """
1915 1c3231aa Thomas Thrainer
    node_uuids = instance.all_nodes
1916 763ad5be Thomas Thrainer
1917 763ad5be Thomas Thrainer
    for idx, dev in enumerate(instance.disks):
1918 1c3231aa Thomas Thrainer
      for node_uuid in node_uuids:
1919 1c3231aa Thomas Thrainer
        self.lu.LogInfo("Checking disk/%d on %s", idx,
1920 1c3231aa Thomas Thrainer
                        self.cfg.GetNodeName(node_uuid))
1921 763ad5be Thomas Thrainer
1922 1c3231aa Thomas Thrainer
        result = _BlockdevFind(self, node_uuid, dev, instance)
1923 763ad5be Thomas Thrainer
1924 763ad5be Thomas Thrainer
        if result.offline:
1925 763ad5be Thomas Thrainer
          continue
1926 763ad5be Thomas Thrainer
        elif result.fail_msg or not result.payload:
1927 763ad5be Thomas Thrainer
          return False
1928 763ad5be Thomas Thrainer
1929 763ad5be Thomas Thrainer
    return True
1930 763ad5be Thomas Thrainer
1931 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1932 763ad5be Thomas Thrainer
    """Check prerequisites.
1933 763ad5be Thomas Thrainer

1934 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1935 763ad5be Thomas Thrainer

1936 763ad5be Thomas Thrainer
    """
1937 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
1938 d0d7d7cf Thomas Thrainer
    assert self.instance is not None, \
1939 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.instance_name
1940 763ad5be Thomas Thrainer
1941 d0d7d7cf Thomas Thrainer
    if self.instance.disk_template != constants.DT_DRBD8:
1942 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
1943 763ad5be Thomas Thrainer
                                 " instances", errors.ECODE_INVAL)
1944 763ad5be Thomas Thrainer
1945 d0d7d7cf Thomas Thrainer
    if len(self.instance.secondary_nodes) != 1:
1946 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The instance has a strange layout,"
1947 763ad5be Thomas Thrainer
                                 " expected one secondary but found %d" %
1948 d0d7d7cf Thomas Thrainer
                                 len(self.instance.secondary_nodes),
1949 763ad5be Thomas Thrainer
                                 errors.ECODE_FAULT)
1950 763ad5be Thomas Thrainer
1951 d0d7d7cf Thomas Thrainer
    secondary_node_uuid = self.instance.secondary_nodes[0]
1952 763ad5be Thomas Thrainer
1953 763ad5be Thomas Thrainer
    if self.iallocator_name is None:
1954 1c3231aa Thomas Thrainer
      remote_node_uuid = self.remote_node_uuid
1955 763ad5be Thomas Thrainer
    else:
1956 1c3231aa Thomas Thrainer
      remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name,
1957 da4a52a3 Thomas Thrainer
                                            self.instance.uuid,
1958 d0d7d7cf Thomas Thrainer
                                            self.instance.secondary_nodes)
1959 763ad5be Thomas Thrainer
1960 1c3231aa Thomas Thrainer
    if remote_node_uuid is None:
1961 763ad5be Thomas Thrainer
      self.remote_node_info = None
1962 763ad5be Thomas Thrainer
    else:
1963 1c3231aa Thomas Thrainer
      assert remote_node_uuid in self.lu.owned_locks(locking.LEVEL_NODE), \
1964 1c3231aa Thomas Thrainer
             "Remote node '%s' is not locked" % remote_node_uuid
1965 763ad5be Thomas Thrainer
1966 1c3231aa Thomas Thrainer
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node_uuid)
1967 763ad5be Thomas Thrainer
      assert self.remote_node_info is not None, \
1968 1c3231aa Thomas Thrainer
        "Cannot retrieve locked node %s" % remote_node_uuid
1969 763ad5be Thomas Thrainer
1970 1c3231aa Thomas Thrainer
    if remote_node_uuid == self.instance.primary_node:
1971 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The specified node is the primary node of"
1972 763ad5be Thomas Thrainer
                                 " the instance", errors.ECODE_INVAL)
1973 763ad5be Thomas Thrainer
1974 1c3231aa Thomas Thrainer
    if remote_node_uuid == secondary_node_uuid:
1975 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The specified node is already the"
1976 763ad5be Thomas Thrainer
                                 " secondary node of the instance",
1977 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
1978 763ad5be Thomas Thrainer
1979 763ad5be Thomas Thrainer
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
1980 763ad5be Thomas Thrainer
                                    constants.REPLACE_DISK_CHG):
1981 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
1982 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
1983 763ad5be Thomas Thrainer
1984 763ad5be Thomas Thrainer
    if self.mode == constants.REPLACE_DISK_AUTO:
1985 d0d7d7cf Thomas Thrainer
      if not self._CheckDisksActivated(self.instance):
1986 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Please run activate-disks on instance %s"
1987 763ad5be Thomas Thrainer
                                   " first" % self.instance_name,
1988 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
1989 d0d7d7cf Thomas Thrainer
      faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
1990 1c3231aa Thomas Thrainer
      faulty_secondary = self._FindFaultyDisks(secondary_node_uuid)
1991 763ad5be Thomas Thrainer
1992 763ad5be Thomas Thrainer
      if faulty_primary and faulty_secondary:
1993 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
1994 763ad5be Thomas Thrainer
                                   " one node and can not be repaired"
1995 763ad5be Thomas Thrainer
                                   " automatically" % self.instance_name,
1996 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
1997 763ad5be Thomas Thrainer
1998 763ad5be Thomas Thrainer
      if faulty_primary:
1999 763ad5be Thomas Thrainer
        self.disks = faulty_primary
2000 d0d7d7cf Thomas Thrainer
        self.target_node_uuid = self.instance.primary_node
2001 1c3231aa Thomas Thrainer
        self.other_node_uuid = secondary_node_uuid
2002 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2003 763ad5be Thomas Thrainer
      elif faulty_secondary:
2004 763ad5be Thomas Thrainer
        self.disks = faulty_secondary
2005 1c3231aa Thomas Thrainer
        self.target_node_uuid = secondary_node_uuid
2006 d0d7d7cf Thomas Thrainer
        self.other_node_uuid = self.instance.primary_node
2007 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2008 763ad5be Thomas Thrainer
      else:
2009 763ad5be Thomas Thrainer
        self.disks = []
2010 763ad5be Thomas Thrainer
        check_nodes = []
2011 763ad5be Thomas Thrainer
2012 763ad5be Thomas Thrainer
    else:
2013 763ad5be Thomas Thrainer
      # Non-automatic modes
2014 763ad5be Thomas Thrainer
      if self.mode == constants.REPLACE_DISK_PRI:
2015 d0d7d7cf Thomas Thrainer
        self.target_node_uuid = self.instance.primary_node
2016 1c3231aa Thomas Thrainer
        self.other_node_uuid = secondary_node_uuid
2017 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2018 763ad5be Thomas Thrainer
2019 763ad5be Thomas Thrainer
      elif self.mode == constants.REPLACE_DISK_SEC:
2020 1c3231aa Thomas Thrainer
        self.target_node_uuid = secondary_node_uuid
2021 d0d7d7cf Thomas Thrainer
        self.other_node_uuid = self.instance.primary_node
2022 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2023 763ad5be Thomas Thrainer
2024 763ad5be Thomas Thrainer
      elif self.mode == constants.REPLACE_DISK_CHG:
2025 1c3231aa Thomas Thrainer
        self.new_node_uuid = remote_node_uuid
2026 d0d7d7cf Thomas Thrainer
        self.other_node_uuid = self.instance.primary_node
2027 1c3231aa Thomas Thrainer
        self.target_node_uuid = secondary_node_uuid
2028 1c3231aa Thomas Thrainer
        check_nodes = [self.new_node_uuid, self.other_node_uuid]
2029 763ad5be Thomas Thrainer
2030 1c3231aa Thomas Thrainer
        CheckNodeNotDrained(self.lu, remote_node_uuid)
2031 1c3231aa Thomas Thrainer
        CheckNodeVmCapable(self.lu, remote_node_uuid)
2032 763ad5be Thomas Thrainer
2033 1c3231aa Thomas Thrainer
        old_node_info = self.cfg.GetNodeInfo(secondary_node_uuid)
2034 763ad5be Thomas Thrainer
        assert old_node_info is not None
2035 763ad5be Thomas Thrainer
        if old_node_info.offline and not self.early_release:
2036 763ad5be Thomas Thrainer
          # doesn't make sense to delay the release
2037 763ad5be Thomas Thrainer
          self.early_release = True
2038 763ad5be Thomas Thrainer
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
2039 1c3231aa Thomas Thrainer
                          " early-release mode", secondary_node_uuid)
2040 763ad5be Thomas Thrainer
2041 763ad5be Thomas Thrainer
      else:
2042 763ad5be Thomas Thrainer
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
2043 763ad5be Thomas Thrainer
                                     self.mode)
2044 763ad5be Thomas Thrainer
2045 763ad5be Thomas Thrainer
      # If not specified all disks should be replaced
2046 763ad5be Thomas Thrainer
      if not self.disks:
2047 763ad5be Thomas Thrainer
        self.disks = range(len(self.instance.disks))
2048 763ad5be Thomas Thrainer
2049 763ad5be Thomas Thrainer
    # TODO: This is ugly, but right now we can't distinguish between internal
2050 763ad5be Thomas Thrainer
    # submitted opcode and external one. We should fix that.
2051 763ad5be Thomas Thrainer
    if self.remote_node_info:
2052 763ad5be Thomas Thrainer
      # We change the node, lets verify it still meets instance policy
2053 763ad5be Thomas Thrainer
      new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
2054 763ad5be Thomas Thrainer
      cluster = self.cfg.GetClusterInfo()
2055 763ad5be Thomas Thrainer
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2056 763ad5be Thomas Thrainer
                                                              new_group_info)
2057 d0d7d7cf Thomas Thrainer
      CheckTargetNodeIPolicy(self, ipolicy, self.instance,
2058 d0d7d7cf Thomas Thrainer
                             self.remote_node_info, self.cfg,
2059 d0d7d7cf Thomas Thrainer
                             ignore=self.ignore_ipolicy)
2060 763ad5be Thomas Thrainer
2061 1c3231aa Thomas Thrainer
    for node_uuid in check_nodes:
2062 1c3231aa Thomas Thrainer
      CheckNodeOnline(self.lu, node_uuid)
2063 763ad5be Thomas Thrainer
2064 1c3231aa Thomas Thrainer
    touched_nodes = frozenset(node_uuid for node_uuid in [self.new_node_uuid,
2065 1c3231aa Thomas Thrainer
                                                          self.other_node_uuid,
2066 1c3231aa Thomas Thrainer
                                                          self.target_node_uuid]
2067 1c3231aa Thomas Thrainer
                              if node_uuid is not None)
2068 763ad5be Thomas Thrainer
2069 763ad5be Thomas Thrainer
    # Release unneeded node and node resource locks
2070 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
2071 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
2072 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
2073 763ad5be Thomas Thrainer
2074 763ad5be Thomas Thrainer
    # Release any owned node group
2075 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
2076 763ad5be Thomas Thrainer
2077 763ad5be Thomas Thrainer
    # Check whether disks are valid
2078 763ad5be Thomas Thrainer
    for disk_idx in self.disks:
2079 d0d7d7cf Thomas Thrainer
      self.instance.FindDisk(disk_idx)
2080 763ad5be Thomas Thrainer
2081 763ad5be Thomas Thrainer
    # Get secondary node IP addresses
2082 1c3231aa Thomas Thrainer
    self.node_secondary_ip = dict((uuid, node.secondary_ip) for (uuid, node)
2083 763ad5be Thomas Thrainer
                                  in self.cfg.GetMultiNodeInfo(touched_nodes))
2084 763ad5be Thomas Thrainer
2085 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
2086 763ad5be Thomas Thrainer
    """Execute disk replacement.
2087 763ad5be Thomas Thrainer

2088 763ad5be Thomas Thrainer
    This dispatches the disk replacement to the appropriate handler.
2089 763ad5be Thomas Thrainer

2090 763ad5be Thomas Thrainer
    """
2091 763ad5be Thomas Thrainer
    if __debug__:
2092 763ad5be Thomas Thrainer
      # Verify owned locks before starting operation
2093 763ad5be Thomas Thrainer
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
2094 763ad5be Thomas Thrainer
      assert set(owned_nodes) == set(self.node_secondary_ip), \
2095 763ad5be Thomas Thrainer
          ("Incorrect node locks, owning %s, expected %s" %
2096 763ad5be Thomas Thrainer
           (owned_nodes, self.node_secondary_ip.keys()))
2097 763ad5be Thomas Thrainer
      assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
2098 763ad5be Thomas Thrainer
              self.lu.owned_locks(locking.LEVEL_NODE_RES))
2099 763ad5be Thomas Thrainer
      assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
2100 763ad5be Thomas Thrainer
2101 763ad5be Thomas Thrainer
      owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
2102 763ad5be Thomas Thrainer
      assert list(owned_instances) == [self.instance_name], \
2103 763ad5be Thomas Thrainer
          "Instance '%s' not locked" % self.instance_name
2104 763ad5be Thomas Thrainer
2105 763ad5be Thomas Thrainer
      assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
2106 763ad5be Thomas Thrainer
          "Should not own any node group lock at this point"
2107 763ad5be Thomas Thrainer
2108 763ad5be Thomas Thrainer
    if not self.disks:
2109 763ad5be Thomas Thrainer
      feedback_fn("No disks need replacement for instance '%s'" %
2110 763ad5be Thomas Thrainer
                  self.instance.name)
2111 763ad5be Thomas Thrainer
      return
2112 763ad5be Thomas Thrainer
2113 763ad5be Thomas Thrainer
    feedback_fn("Replacing disk(s) %s for instance '%s'" %
2114 763ad5be Thomas Thrainer
                (utils.CommaJoin(self.disks), self.instance.name))
2115 1c3231aa Thomas Thrainer
    feedback_fn("Current primary node: %s" %
2116 1c3231aa Thomas Thrainer
                self.cfg.GetNodeName(self.instance.primary_node))
2117 763ad5be Thomas Thrainer
    feedback_fn("Current seconary node: %s" %
2118 1c3231aa Thomas Thrainer
                utils.CommaJoin(self.cfg.GetNodeNames(
2119 1c3231aa Thomas Thrainer
                                  self.instance.secondary_nodes)))
2120 763ad5be Thomas Thrainer
2121 1d4a4b26 Thomas Thrainer
    activate_disks = not self.instance.disks_active
2122 763ad5be Thomas Thrainer
2123 763ad5be Thomas Thrainer
    # Activate the instance disks if we're replacing them on a down instance
2124 763ad5be Thomas Thrainer
    if activate_disks:
2125 5eacbcae Thomas Thrainer
      StartInstanceDisks(self.lu, self.instance, True)
2126 763ad5be Thomas Thrainer
2127 763ad5be Thomas Thrainer
    try:
2128 763ad5be Thomas Thrainer
      # Should we replace the secondary node?
2129 1c3231aa Thomas Thrainer
      if self.new_node_uuid is not None:
2130 763ad5be Thomas Thrainer
        fn = self._ExecDrbd8Secondary
2131 763ad5be Thomas Thrainer
      else:
2132 763ad5be Thomas Thrainer
        fn = self._ExecDrbd8DiskOnly
2133 763ad5be Thomas Thrainer
2134 763ad5be Thomas Thrainer
      result = fn(feedback_fn)
2135 763ad5be Thomas Thrainer
    finally:
2136 763ad5be Thomas Thrainer
      # Deactivate the instance disks if we're replacing them on a
2137 763ad5be Thomas Thrainer
      # down instance
2138 763ad5be Thomas Thrainer
      if activate_disks:
2139 763ad5be Thomas Thrainer
        _SafeShutdownInstanceDisks(self.lu, self.instance)
2140 763ad5be Thomas Thrainer
2141 763ad5be Thomas Thrainer
    assert not self.lu.owned_locks(locking.LEVEL_NODE)
2142 763ad5be Thomas Thrainer
2143 763ad5be Thomas Thrainer
    if __debug__:
2144 763ad5be Thomas Thrainer
      # Verify owned locks
2145 763ad5be Thomas Thrainer
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
2146 763ad5be Thomas Thrainer
      nodes = frozenset(self.node_secondary_ip)
2147 763ad5be Thomas Thrainer
      assert ((self.early_release and not owned_nodes) or
2148 763ad5be Thomas Thrainer
              (not self.early_release and not (set(owned_nodes) - nodes))), \
2149 763ad5be Thomas Thrainer
        ("Not owning the correct locks, early_release=%s, owned=%r,"
2150 763ad5be Thomas Thrainer
         " nodes=%r" % (self.early_release, owned_nodes, nodes))
2151 763ad5be Thomas Thrainer
2152 763ad5be Thomas Thrainer
    return result
2153 763ad5be Thomas Thrainer
2154 1c3231aa Thomas Thrainer
  def _CheckVolumeGroup(self, node_uuids):
2155 763ad5be Thomas Thrainer
    self.lu.LogInfo("Checking volume groups")
2156 763ad5be Thomas Thrainer
2157 763ad5be Thomas Thrainer
    vgname = self.cfg.GetVGName()
2158 763ad5be Thomas Thrainer
2159 763ad5be Thomas Thrainer
    # Make sure volume group exists on all involved nodes
2160 1c3231aa Thomas Thrainer
    results = self.rpc.call_vg_list(node_uuids)
2161 763ad5be Thomas Thrainer
    if not results:
2162 763ad5be Thomas Thrainer
      raise errors.OpExecError("Can't list volume groups on the nodes")
2163 763ad5be Thomas Thrainer
2164 1c3231aa Thomas Thrainer
    for node_uuid in node_uuids:
2165 1c3231aa Thomas Thrainer
      res = results[node_uuid]
2166 1c3231aa Thomas Thrainer
      res.Raise("Error checking node %s" % self.cfg.GetNodeName(node_uuid))
2167 763ad5be Thomas Thrainer
      if vgname not in res.payload:
2168 763ad5be Thomas Thrainer
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
2169 1c3231aa Thomas Thrainer
                                 (vgname, self.cfg.GetNodeName(node_uuid)))
2170 763ad5be Thomas Thrainer
2171 1c3231aa Thomas Thrainer
  def _CheckDisksExistence(self, node_uuids):
2172 763ad5be Thomas Thrainer
    # Check disk existence
2173 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2174 763ad5be Thomas Thrainer
      if idx not in self.disks:
2175 763ad5be Thomas Thrainer
        continue
2176 763ad5be Thomas Thrainer
2177 1c3231aa Thomas Thrainer
      for node_uuid in node_uuids:
2178 1c3231aa Thomas Thrainer
        self.lu.LogInfo("Checking disk/%d on %s", idx,
2179 1c3231aa Thomas Thrainer
                        self.cfg.GetNodeName(node_uuid))
2180 763ad5be Thomas Thrainer
2181 1c3231aa Thomas Thrainer
        result = _BlockdevFind(self, node_uuid, dev, self.instance)
2182 763ad5be Thomas Thrainer
2183 763ad5be Thomas Thrainer
        msg = result.fail_msg
2184 763ad5be Thomas Thrainer
        if msg or not result.payload:
2185 763ad5be Thomas Thrainer
          if not msg:
2186 763ad5be Thomas Thrainer
            msg = "disk not found"
2187 763ad5be Thomas Thrainer
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
2188 1c3231aa Thomas Thrainer
                                   (idx, self.cfg.GetNodeName(node_uuid), msg))
2189 763ad5be Thomas Thrainer
2190 1c3231aa Thomas Thrainer
  def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk):
2191 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2192 763ad5be Thomas Thrainer
      if idx not in self.disks:
2193 763ad5be Thomas Thrainer
        continue
2194 763ad5be Thomas Thrainer
2195 763ad5be Thomas Thrainer
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
2196 1c3231aa Thomas Thrainer
                      (idx, self.cfg.GetNodeName(node_uuid)))
2197 763ad5be Thomas Thrainer
2198 1c3231aa Thomas Thrainer
      if not CheckDiskConsistency(self.lu, self.instance, dev, node_uuid,
2199 5eacbcae Thomas Thrainer
                                  on_primary, ldisk=ldisk):
2200 763ad5be Thomas Thrainer
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
2201 763ad5be Thomas Thrainer
                                 " replace disks for instance %s" %
2202 1c3231aa Thomas Thrainer
                                 (self.cfg.GetNodeName(node_uuid),
2203 1c3231aa Thomas Thrainer
                                  self.instance.name))
2204 763ad5be Thomas Thrainer
2205 1c3231aa Thomas Thrainer
  def _CreateNewStorage(self, node_uuid):
2206 763ad5be Thomas Thrainer
    """Create new storage on the primary or secondary node.
2207 763ad5be Thomas Thrainer

2208 763ad5be Thomas Thrainer
    This is only used for same-node replaces, not for changing the
2209 763ad5be Thomas Thrainer
    secondary node, hence we don't want to modify the existing disk.
2210 763ad5be Thomas Thrainer

2211 763ad5be Thomas Thrainer
    """
2212 763ad5be Thomas Thrainer
    iv_names = {}
2213 763ad5be Thomas Thrainer
2214 5eacbcae Thomas Thrainer
    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
2215 763ad5be Thomas Thrainer
    for idx, dev in enumerate(disks):
2216 763ad5be Thomas Thrainer
      if idx not in self.disks:
2217 763ad5be Thomas Thrainer
        continue
2218 763ad5be Thomas Thrainer
2219 1c3231aa Thomas Thrainer
      self.lu.LogInfo("Adding storage on %s for disk/%d",
2220 1c3231aa Thomas Thrainer
                      self.cfg.GetNodeName(node_uuid), idx)
2221 763ad5be Thomas Thrainer
2222 763ad5be Thomas Thrainer
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
2223 763ad5be Thomas Thrainer
      names = _GenerateUniqueNames(self.lu, lv_names)
2224 763ad5be Thomas Thrainer
2225 763ad5be Thomas Thrainer
      (data_disk, meta_disk) = dev.children
2226 763ad5be Thomas Thrainer
      vg_data = data_disk.logical_id[0]
2227 cd3b4ff4 Helga Velroyen
      lv_data = objects.Disk(dev_type=constants.DT_PLAIN, size=dev.size,
2228 763ad5be Thomas Thrainer
                             logical_id=(vg_data, names[0]),
2229 763ad5be Thomas Thrainer
                             params=data_disk.params)
2230 763ad5be Thomas Thrainer
      vg_meta = meta_disk.logical_id[0]
2231 cd3b4ff4 Helga Velroyen
      lv_meta = objects.Disk(dev_type=constants.DT_PLAIN,
2232 763ad5be Thomas Thrainer
                             size=constants.DRBD_META_SIZE,
2233 763ad5be Thomas Thrainer
                             logical_id=(vg_meta, names[1]),
2234 763ad5be Thomas Thrainer
                             params=meta_disk.params)
2235 763ad5be Thomas Thrainer
2236 763ad5be Thomas Thrainer
      new_lvs = [lv_data, lv_meta]
2237 763ad5be Thomas Thrainer
      old_lvs = [child.Copy() for child in dev.children]
2238 763ad5be Thomas Thrainer
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
2239 1c3231aa Thomas Thrainer
      excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg, node_uuid)
2240 763ad5be Thomas Thrainer
2241 763ad5be Thomas Thrainer
      # we pass force_create=True to force the LVM creation
2242 763ad5be Thomas Thrainer
      for new_lv in new_lvs:
2243 f2b58d93 Thomas Thrainer
        try:
2244 dad226e3 Thomas Thrainer
          _CreateBlockDevInner(self.lu, node_uuid, self.instance, new_lv, True,
2245 f2b58d93 Thomas Thrainer
                               GetInstanceInfoText(self.instance), False,
2246 f2b58d93 Thomas Thrainer
                               excl_stor)
2247 f2b58d93 Thomas Thrainer
        except errors.DeviceCreationError, e:
2248 f2b58d93 Thomas Thrainer
          raise errors.OpExecError("Can't create block device: %s" % e.message)
2249 763ad5be Thomas Thrainer
2250 763ad5be Thomas Thrainer
    return iv_names
2251 763ad5be Thomas Thrainer
2252 1c3231aa Thomas Thrainer
  def _CheckDevices(self, node_uuid, iv_names):
2253 763ad5be Thomas Thrainer
    for name, (dev, _, _) in iv_names.iteritems():
2254 1c3231aa Thomas Thrainer
      result = _BlockdevFind(self, node_uuid, dev, self.instance)
2255 763ad5be Thomas Thrainer
2256 763ad5be Thomas Thrainer
      msg = result.fail_msg
2257 763ad5be Thomas Thrainer
      if msg or not result.payload:
2258 763ad5be Thomas Thrainer
        if not msg:
2259 763ad5be Thomas Thrainer
          msg = "disk not found"
2260 763ad5be Thomas Thrainer
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
2261 763ad5be Thomas Thrainer
                                 (name, msg))
2262 763ad5be Thomas Thrainer
2263 763ad5be Thomas Thrainer
      if result.payload.is_degraded:
2264 763ad5be Thomas Thrainer
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
2265 763ad5be Thomas Thrainer
2266 1c3231aa Thomas Thrainer
  def _RemoveOldStorage(self, node_uuid, iv_names):
2267 763ad5be Thomas Thrainer
    for name, (_, old_lvs, _) in iv_names.iteritems():
2268 763ad5be Thomas Thrainer
      self.lu.LogInfo("Remove logical volumes for %s", name)
2269 763ad5be Thomas Thrainer
2270 763ad5be Thomas Thrainer
      for lv in old_lvs:
2271 0c3d9c7c Thomas Thrainer
        msg = self.rpc.call_blockdev_remove(node_uuid, (lv, self.instance)) \
2272 0c3d9c7c Thomas Thrainer
                .fail_msg
2273 763ad5be Thomas Thrainer
        if msg:
2274 763ad5be Thomas Thrainer
          self.lu.LogWarning("Can't remove old LV: %s", msg,
2275 763ad5be Thomas Thrainer
                             hint="remove unused LVs manually")
2276 763ad5be Thomas Thrainer
2277 763ad5be Thomas Thrainer
  def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
2278 763ad5be Thomas Thrainer
    """Replace a disk on the primary or secondary for DRBD 8.
2279 763ad5be Thomas Thrainer

2280 763ad5be Thomas Thrainer
    The algorithm for replace is quite complicated:
2281 763ad5be Thomas Thrainer

2282 763ad5be Thomas Thrainer
      1. for each disk to be replaced:
2283 763ad5be Thomas Thrainer

2284 763ad5be Thomas Thrainer
        1. create new LVs on the target node with unique names
2285 763ad5be Thomas Thrainer
        1. detach old LVs from the drbd device
2286 763ad5be Thomas Thrainer
        1. rename old LVs to name_replaced.<time_t>
2287 763ad5be Thomas Thrainer
        1. rename new LVs to old LVs
2288 763ad5be Thomas Thrainer
        1. attach the new LVs (with the old names now) to the drbd device
2289 763ad5be Thomas Thrainer

2290 763ad5be Thomas Thrainer
      1. wait for sync across all devices
2291 763ad5be Thomas Thrainer

2292 763ad5be Thomas Thrainer
      1. for each modified disk:
2293 763ad5be Thomas Thrainer

2294 763ad5be Thomas Thrainer
        1. remove old LVs (which have the name name_replaces.<time_t>)
2295 763ad5be Thomas Thrainer

2296 763ad5be Thomas Thrainer
    Failures are not very well handled.
2297 763ad5be Thomas Thrainer

2298 763ad5be Thomas Thrainer
    """
2299 763ad5be Thomas Thrainer
    steps_total = 6
2300 763ad5be Thomas Thrainer
2301 763ad5be Thomas Thrainer
    # Step: check device activation
2302 763ad5be Thomas Thrainer
    self.lu.LogStep(1, steps_total, "Check device existence")
2303 1c3231aa Thomas Thrainer
    self._CheckDisksExistence([self.other_node_uuid, self.target_node_uuid])
2304 1c3231aa Thomas Thrainer
    self._CheckVolumeGroup([self.target_node_uuid, self.other_node_uuid])
2305 763ad5be Thomas Thrainer
2306 763ad5be Thomas Thrainer
    # Step: check other node consistency
2307 763ad5be Thomas Thrainer
    self.lu.LogStep(2, steps_total, "Check peer consistency")
2308 1c3231aa Thomas Thrainer
    self._CheckDisksConsistency(
2309 1c3231aa Thomas Thrainer
      self.other_node_uuid, self.other_node_uuid == self.instance.primary_node,
2310 1c3231aa Thomas Thrainer
      False)
2311 763ad5be Thomas Thrainer
2312 763ad5be Thomas Thrainer
    # Step: create new storage
2313 763ad5be Thomas Thrainer
    self.lu.LogStep(3, steps_total, "Allocate new storage")
2314 1c3231aa Thomas Thrainer
    iv_names = self._CreateNewStorage(self.target_node_uuid)
2315 763ad5be Thomas Thrainer
2316 763ad5be Thomas Thrainer
    # Step: for each lv, detach+rename*2+attach
2317 763ad5be Thomas Thrainer
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
2318 763ad5be Thomas Thrainer
    for dev, old_lvs, new_lvs in iv_names.itervalues():
2319 763ad5be Thomas Thrainer
      self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name)
2320 763ad5be Thomas Thrainer
2321 0c3d9c7c Thomas Thrainer
      result = self.rpc.call_blockdev_removechildren(self.target_node_uuid,
2322 0c3d9c7c Thomas Thrainer
                                                     (dev, self.instance),
2323 0c3d9c7c Thomas Thrainer
                                                     (old_lvs, self.instance))
2324 763ad5be Thomas Thrainer
      result.Raise("Can't detach drbd from local storage on node"
2325 1c3231aa Thomas Thrainer
                   " %s for device %s" %
2326 1c3231aa Thomas Thrainer
                   (self.cfg.GetNodeName(self.target_node_uuid), dev.iv_name))
2327 763ad5be Thomas Thrainer
      #dev.children = []
2328 763ad5be Thomas Thrainer
      #cfg.Update(instance)
2329 763ad5be Thomas Thrainer
2330 763ad5be Thomas Thrainer
      # ok, we created the new LVs, so now we know we have the needed
2331 763ad5be Thomas Thrainer
      # storage; as such, we proceed on the target node to rename
2332 763ad5be Thomas Thrainer
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
2333 a57e502a Thomas Thrainer
      # using the assumption that logical_id == unique_id on that node
2334 763ad5be Thomas Thrainer
2335 763ad5be Thomas Thrainer
      # FIXME(iustin): use a better name for the replaced LVs
2336 763ad5be Thomas Thrainer
      temp_suffix = int(time.time())
2337 a57e502a Thomas Thrainer
      ren_fn = lambda d, suff: (d.logical_id[0],
2338 a57e502a Thomas Thrainer
                                d.logical_id[1] + "_replaced-%s" % suff)
2339 763ad5be Thomas Thrainer
2340 763ad5be Thomas Thrainer
      # Build the rename list based on what LVs exist on the node
2341 763ad5be Thomas Thrainer
      rename_old_to_new = []
2342 763ad5be Thomas Thrainer
      for to_ren in old_lvs:
2343 0c3d9c7c Thomas Thrainer
        result = self.rpc.call_blockdev_find(self.target_node_uuid,
2344 0c3d9c7c Thomas Thrainer
                                             (to_ren, self.instance))
2345 763ad5be Thomas Thrainer
        if not result.fail_msg and result.payload:
2346 763ad5be Thomas Thrainer
          # device exists
2347 763ad5be Thomas Thrainer
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
2348 763ad5be Thomas Thrainer
2349 763ad5be Thomas Thrainer
      self.lu.LogInfo("Renaming the old LVs on the target node")
2350 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_rename(self.target_node_uuid,
2351 763ad5be Thomas Thrainer
                                             rename_old_to_new)
2352 1c3231aa Thomas Thrainer
      result.Raise("Can't rename old LVs on node %s" %
2353 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(self.target_node_uuid))
2354 763ad5be Thomas Thrainer
2355 763ad5be Thomas Thrainer
      # Now we rename the new LVs to the old LVs
2356 763ad5be Thomas Thrainer
      self.lu.LogInfo("Renaming the new LVs on the target node")
2357 a57e502a Thomas Thrainer
      rename_new_to_old = [(new, old.logical_id)
2358 763ad5be Thomas Thrainer
                           for old, new in zip(old_lvs, new_lvs)]
2359 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_rename(self.target_node_uuid,
2360 763ad5be Thomas Thrainer
                                             rename_new_to_old)
2361 1c3231aa Thomas Thrainer
      result.Raise("Can't rename new LVs on node %s" %
2362 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(self.target_node_uuid))
2363 763ad5be Thomas Thrainer
2364 763ad5be Thomas Thrainer
      # Intermediate steps of in memory modifications
2365 763ad5be Thomas Thrainer
      for old, new in zip(old_lvs, new_lvs):
2366 763ad5be Thomas Thrainer
        new.logical_id = old.logical_id
2367 763ad5be Thomas Thrainer
2368 763ad5be Thomas Thrainer
      # We need to modify old_lvs so that removal later removes the
2369 763ad5be Thomas Thrainer
      # right LVs, not the newly added ones; note that old_lvs is a
2370 763ad5be Thomas Thrainer
      # copy here
2371 763ad5be Thomas Thrainer
      for disk in old_lvs:
2372 763ad5be Thomas Thrainer
        disk.logical_id = ren_fn(disk, temp_suffix)
2373 763ad5be Thomas Thrainer
2374 763ad5be Thomas Thrainer
      # Now that the new lvs have the old name, we can add them to the device
2375 1c3231aa Thomas Thrainer
      self.lu.LogInfo("Adding new mirror component on %s",
2376 1c3231aa Thomas Thrainer
                      self.cfg.GetNodeName(self.target_node_uuid))
2377 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_addchildren(self.target_node_uuid,
2378 0c3d9c7c Thomas Thrainer
                                                  (dev, self.instance),
2379 0c3d9c7c Thomas Thrainer
                                                  (new_lvs, self.instance))
2380 763ad5be Thomas Thrainer
      msg = result.fail_msg
2381 763ad5be Thomas Thrainer
      if msg:
2382 763ad5be Thomas Thrainer
        for new_lv in new_lvs:
2383 1c3231aa Thomas Thrainer
          msg2 = self.rpc.call_blockdev_remove(self.target_node_uuid,
2384 0c3d9c7c Thomas Thrainer
                                               (new_lv, self.instance)).fail_msg
2385 763ad5be Thomas Thrainer
          if msg2:
2386 763ad5be Thomas Thrainer
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
2387 763ad5be Thomas Thrainer
                               hint=("cleanup manually the unused logical"
2388 763ad5be Thomas Thrainer
                                     "volumes"))
2389 763ad5be Thomas Thrainer
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
2390 763ad5be Thomas Thrainer
2391 763ad5be Thomas Thrainer
    cstep = itertools.count(5)
2392 763ad5be Thomas Thrainer
2393 763ad5be Thomas Thrainer
    if self.early_release:
2394 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2395 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)
2396 763ad5be Thomas Thrainer
      # TODO: Check if releasing locks early still makes sense
2397 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
2398 763ad5be Thomas Thrainer
    else:
2399 763ad5be Thomas Thrainer
      # Release all resource locks except those used by the instance
2400 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
2401 5eacbcae Thomas Thrainer
                   keep=self.node_secondary_ip.keys())
2402 763ad5be Thomas Thrainer
2403 763ad5be Thomas Thrainer
    # Release all node locks while waiting for sync
2404 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE)
2405 763ad5be Thomas Thrainer
2406 763ad5be Thomas Thrainer
    # TODO: Can the instance lock be downgraded here? Take the optional disk
2407 763ad5be Thomas Thrainer
    # shutdown in the caller into consideration.
2408 763ad5be Thomas Thrainer
2409 763ad5be Thomas Thrainer
    # Wait for sync
2410 763ad5be Thomas Thrainer
    # This can fail as the old devices are degraded and _WaitForSync
2411 763ad5be Thomas Thrainer
    # does a combined result over all disks, so we don't check its return value
2412 763ad5be Thomas Thrainer
    self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
2413 5eacbcae Thomas Thrainer
    WaitForSync(self.lu, self.instance)
2414 763ad5be Thomas Thrainer
2415 763ad5be Thomas Thrainer
    # Check all devices manually
2416 763ad5be Thomas Thrainer
    self._CheckDevices(self.instance.primary_node, iv_names)
2417 763ad5be Thomas Thrainer
2418 763ad5be Thomas Thrainer
    # Step: remove old storage
2419 763ad5be Thomas Thrainer
    if not self.early_release:
2420 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2421 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)
2422 763ad5be Thomas Thrainer
2423 763ad5be Thomas Thrainer
  def _ExecDrbd8Secondary(self, feedback_fn):
2424 763ad5be Thomas Thrainer
    """Replace the secondary node for DRBD 8.
2425 763ad5be Thomas Thrainer

2426 763ad5be Thomas Thrainer
    The algorithm for replace is quite complicated:
2427 763ad5be Thomas Thrainer
      - for all disks of the instance:
2428 763ad5be Thomas Thrainer
        - create new LVs on the new node with same names
2429 763ad5be Thomas Thrainer
        - shutdown the drbd device on the old secondary
2430 763ad5be Thomas Thrainer
        - disconnect the drbd network on the primary
2431 763ad5be Thomas Thrainer
        - create the drbd device on the new secondary
2432 763ad5be Thomas Thrainer
        - network attach the drbd on the primary, using an artifice:
2433 763ad5be Thomas Thrainer
          the drbd code for Attach() will connect to the network if it
2434 763ad5be Thomas Thrainer
          finds a device which is connected to the good local disks but
2435 763ad5be Thomas Thrainer
          not network enabled
2436 763ad5be Thomas Thrainer
      - wait for sync across all devices
2437 763ad5be Thomas Thrainer
      - remove all disks from the old secondary
2438 763ad5be Thomas Thrainer

2439 763ad5be Thomas Thrainer
    Failures are not very well handled.
2440 763ad5be Thomas Thrainer

2441 763ad5be Thomas Thrainer
    """
2442 763ad5be Thomas Thrainer
    steps_total = 6
2443 763ad5be Thomas Thrainer
2444 763ad5be Thomas Thrainer
    pnode = self.instance.primary_node
2445 763ad5be Thomas Thrainer
2446 763ad5be Thomas Thrainer
    # Step: check device activation
2447 763ad5be Thomas Thrainer
    self.lu.LogStep(1, steps_total, "Check device existence")
2448 763ad5be Thomas Thrainer
    self._CheckDisksExistence([self.instance.primary_node])
2449 763ad5be Thomas Thrainer
    self._CheckVolumeGroup([self.instance.primary_node])
2450 763ad5be Thomas Thrainer
2451 763ad5be Thomas Thrainer
    # Step: check other node consistency
2452 763ad5be Thomas Thrainer
    self.lu.LogStep(2, steps_total, "Check peer consistency")
2453 763ad5be Thomas Thrainer
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
2454 763ad5be Thomas Thrainer
2455 763ad5be Thomas Thrainer
    # Step: create new storage
2456 763ad5be Thomas Thrainer
    self.lu.LogStep(3, steps_total, "Allocate new storage")
2457 5eacbcae Thomas Thrainer
    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
2458 1c3231aa Thomas Thrainer
    excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg,
2459 1c3231aa Thomas Thrainer
                                                  self.new_node_uuid)
2460 763ad5be Thomas Thrainer
    for idx, dev in enumerate(disks):
2461 763ad5be Thomas Thrainer
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
2462 1c3231aa Thomas Thrainer
                      (self.cfg.GetNodeName(self.new_node_uuid), idx))
2463 763ad5be Thomas Thrainer
      # we pass force_create=True to force LVM creation
2464 763ad5be Thomas Thrainer
      for new_lv in dev.children:
2465 f2b58d93 Thomas Thrainer
        try:
2466 dad226e3 Thomas Thrainer
          _CreateBlockDevInner(self.lu, self.new_node_uuid, self.instance,
2467 dad226e3 Thomas Thrainer
                               new_lv, True, GetInstanceInfoText(self.instance),
2468 dad226e3 Thomas Thrainer
                               False, excl_stor)
2469 f2b58d93 Thomas Thrainer
        except errors.DeviceCreationError, e:
2470 f2b58d93 Thomas Thrainer
          raise errors.OpExecError("Can't create block device: %s" % e.message)
2471 763ad5be Thomas Thrainer
2472 763ad5be Thomas Thrainer
    # Step 4: dbrd minors and drbd setups changes
2473 763ad5be Thomas Thrainer
    # after this, we must manually remove the drbd minors on both the
2474 763ad5be Thomas Thrainer
    # error and the success paths
2475 763ad5be Thomas Thrainer
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
2476 1c3231aa Thomas Thrainer
    minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
2477 1c3231aa Thomas Thrainer
                                         for _ in self.instance.disks],
2478 da4a52a3 Thomas Thrainer
                                        self.instance.uuid)
2479 763ad5be Thomas Thrainer
    logging.debug("Allocated minors %r", minors)
2480 763ad5be Thomas Thrainer
2481 763ad5be Thomas Thrainer
    iv_names = {}
2482 763ad5be Thomas Thrainer
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
2483 763ad5be Thomas Thrainer
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
2484 1c3231aa Thomas Thrainer
                      (self.cfg.GetNodeName(self.new_node_uuid), idx))
2485 763ad5be Thomas Thrainer
      # create new devices on new_node; note that we create two IDs:
2486 763ad5be Thomas Thrainer
      # one without port, so the drbd will be activated without
2487 763ad5be Thomas Thrainer
      # networking information on the new node at this stage, and one
2488 763ad5be Thomas Thrainer
      # with network, for the latter activation in step 4
2489 763ad5be Thomas Thrainer
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
2490 763ad5be Thomas Thrainer
      if self.instance.primary_node == o_node1:
2491 763ad5be Thomas Thrainer
        p_minor = o_minor1
2492 763ad5be Thomas Thrainer
      else:
2493 763ad5be Thomas Thrainer
        assert self.instance.primary_node == o_node2, "Three-node instance?"
2494 763ad5be Thomas Thrainer
        p_minor = o_minor2
2495 763ad5be Thomas Thrainer
2496 1c3231aa Thomas Thrainer
      new_alone_id = (self.instance.primary_node, self.new_node_uuid, None,
2497 763ad5be Thomas Thrainer
                      p_minor, new_minor, o_secret)
2498 1c3231aa Thomas Thrainer
      new_net_id = (self.instance.primary_node, self.new_node_uuid, o_port,
2499 763ad5be Thomas Thrainer
                    p_minor, new_minor, o_secret)
2500 763ad5be Thomas Thrainer
2501 763ad5be Thomas Thrainer
      iv_names[idx] = (dev, dev.children, new_net_id)
2502 763ad5be Thomas Thrainer
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
2503 763ad5be Thomas Thrainer
                    new_net_id)
2504 cd3b4ff4 Helga Velroyen
      new_drbd = objects.Disk(dev_type=constants.DT_DRBD8,
2505 763ad5be Thomas Thrainer
                              logical_id=new_alone_id,
2506 763ad5be Thomas Thrainer
                              children=dev.children,
2507 763ad5be Thomas Thrainer
                              size=dev.size,
2508 763ad5be Thomas Thrainer
                              params={})
2509 5eacbcae Thomas Thrainer
      (anno_new_drbd,) = AnnotateDiskParams(self.instance, [new_drbd],
2510 5eacbcae Thomas Thrainer
                                            self.cfg)
2511 763ad5be Thomas Thrainer
      try:
2512 1c3231aa Thomas Thrainer
        CreateSingleBlockDev(self.lu, self.new_node_uuid, self.instance,
2513 5eacbcae Thomas Thrainer
                             anno_new_drbd,
2514 5eacbcae Thomas Thrainer
                             GetInstanceInfoText(self.instance), False,
2515 5eacbcae Thomas Thrainer
                             excl_stor)
2516 763ad5be Thomas Thrainer
      except errors.GenericError:
2517 da4a52a3 Thomas Thrainer
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2518 763ad5be Thomas Thrainer
        raise
2519 763ad5be Thomas Thrainer
2520 763ad5be Thomas Thrainer
    # We have new devices, shutdown the drbd on the old secondary
2521 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2522 763ad5be Thomas Thrainer
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
2523 1c3231aa Thomas Thrainer
      msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
2524 763ad5be Thomas Thrainer
                                            (dev, self.instance)).fail_msg
2525 763ad5be Thomas Thrainer
      if msg:
2526 763ad5be Thomas Thrainer
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
2527 763ad5be Thomas Thrainer
                           "node: %s" % (idx, msg),
2528 763ad5be Thomas Thrainer
                           hint=("Please cleanup this device manually as"
2529 763ad5be Thomas Thrainer
                                 " soon as possible"))
2530 763ad5be Thomas Thrainer
2531 763ad5be Thomas Thrainer
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
2532 0c3d9c7c Thomas Thrainer
    result = self.rpc.call_drbd_disconnect_net(
2533 0c3d9c7c Thomas Thrainer
               [pnode], (self.instance.disks, self.instance))[pnode]
2534 763ad5be Thomas Thrainer
2535 763ad5be Thomas Thrainer
    msg = result.fail_msg
2536 763ad5be Thomas Thrainer
    if msg:
2537 763ad5be Thomas Thrainer
      # detaches didn't succeed (unlikely)
2538 da4a52a3 Thomas Thrainer
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2539 763ad5be Thomas Thrainer
      raise errors.OpExecError("Can't detach the disks from the network on"
2540 763ad5be Thomas Thrainer
                               " old node: %s" % (msg,))
2541 763ad5be Thomas Thrainer
2542 763ad5be Thomas Thrainer
    # if we managed to detach at least one, we update all the disks of
2543 763ad5be Thomas Thrainer
    # the instance to point to the new secondary
2544 763ad5be Thomas Thrainer
    self.lu.LogInfo("Updating instance configuration")
2545 763ad5be Thomas Thrainer
    for dev, _, new_logical_id in iv_names.itervalues():
2546 763ad5be Thomas Thrainer
      dev.logical_id = new_logical_id
2547 763ad5be Thomas Thrainer
2548 763ad5be Thomas Thrainer
    self.cfg.Update(self.instance, feedback_fn)
2549 763ad5be Thomas Thrainer
2550 763ad5be Thomas Thrainer
    # Release all node locks (the configuration has been updated)
2551 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE)
2552 763ad5be Thomas Thrainer
2553 763ad5be Thomas Thrainer
    # and now perform the drbd attach
2554 763ad5be Thomas Thrainer
    self.lu.LogInfo("Attaching primary drbds to new secondary"
2555 763ad5be Thomas Thrainer
                    " (standalone => connected)")
2556 763ad5be Thomas Thrainer
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
2557 1c3231aa Thomas Thrainer
                                            self.new_node_uuid],
2558 763ad5be Thomas Thrainer
                                           (self.instance.disks, self.instance),
2559 763ad5be Thomas Thrainer
                                           self.instance.name,
2560 763ad5be Thomas Thrainer
                                           False)
2561 763ad5be Thomas Thrainer
    for to_node, to_result in result.items():
2562 763ad5be Thomas Thrainer
      msg = to_result.fail_msg
2563 763ad5be Thomas Thrainer
      if msg:
2564 763ad5be Thomas Thrainer
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
2565 1c3231aa Thomas Thrainer
                           self.cfg.GetNodeName(to_node), msg,
2566 763ad5be Thomas Thrainer
                           hint=("please do a gnt-instance info to see the"
2567 763ad5be Thomas Thrainer
                                 " status of disks"))
2568 763ad5be Thomas Thrainer
2569 763ad5be Thomas Thrainer
    cstep = itertools.count(5)
2570 763ad5be Thomas Thrainer
2571 763ad5be Thomas Thrainer
    if self.early_release:
2572 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2573 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)
2574 763ad5be Thomas Thrainer
      # TODO: Check if releasing locks early still makes sense
2575 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
2576 763ad5be Thomas Thrainer
    else:
2577 763ad5be Thomas Thrainer
      # Release all resource locks except those used by the instance
2578 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
2579 5eacbcae Thomas Thrainer
                   keep=self.node_secondary_ip.keys())
2580 763ad5be Thomas Thrainer
2581 763ad5be Thomas Thrainer
    # TODO: Can the instance lock be downgraded here? Take the optional disk
2582 763ad5be Thomas Thrainer
    # shutdown in the caller into consideration.
2583 763ad5be Thomas Thrainer
2584 763ad5be Thomas Thrainer
    # Wait for sync
2585 763ad5be Thomas Thrainer
    # This can fail as the old devices are degraded and _WaitForSync
2586 763ad5be Thomas Thrainer
    # does a combined result over all disks, so we don't check its return value
2587 763ad5be Thomas Thrainer
    self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
2588 5eacbcae Thomas Thrainer
    WaitForSync(self.lu, self.instance)
2589 763ad5be Thomas Thrainer
2590 763ad5be Thomas Thrainer
    # Check all devices manually
2591 763ad5be Thomas Thrainer
    self._CheckDevices(self.instance.primary_node, iv_names)
2592 763ad5be Thomas Thrainer
2593 763ad5be Thomas Thrainer
    # Step: remove old storage
2594 763ad5be Thomas Thrainer
    if not self.early_release:
2595 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2596 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)