Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_storage.py @ 74676af4

History | View | Annotate | Download (99.5 kB)

1 763ad5be Thomas Thrainer
#
2 763ad5be Thomas Thrainer
#
3 763ad5be Thomas Thrainer
4 763ad5be Thomas Thrainer
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 763ad5be Thomas Thrainer
#
6 763ad5be Thomas Thrainer
# This program is free software; you can redistribute it and/or modify
7 763ad5be Thomas Thrainer
# it under the terms of the GNU General Public License as published by
8 763ad5be Thomas Thrainer
# the Free Software Foundation; either version 2 of the License, or
9 763ad5be Thomas Thrainer
# (at your option) any later version.
10 763ad5be Thomas Thrainer
#
11 763ad5be Thomas Thrainer
# This program is distributed in the hope that it will be useful, but
12 763ad5be Thomas Thrainer
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 763ad5be Thomas Thrainer
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 763ad5be Thomas Thrainer
# General Public License for more details.
15 763ad5be Thomas Thrainer
#
16 763ad5be Thomas Thrainer
# You should have received a copy of the GNU General Public License
17 763ad5be Thomas Thrainer
# along with this program; if not, write to the Free Software
18 763ad5be Thomas Thrainer
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 763ad5be Thomas Thrainer
# 02110-1301, USA.
20 763ad5be Thomas Thrainer
21 763ad5be Thomas Thrainer
22 763ad5be Thomas Thrainer
"""Logical units dealing with storage of instances."""
23 763ad5be Thomas Thrainer
24 763ad5be Thomas Thrainer
import itertools
25 763ad5be Thomas Thrainer
import logging
26 763ad5be Thomas Thrainer
import os
27 763ad5be Thomas Thrainer
import time
28 763ad5be Thomas Thrainer
29 763ad5be Thomas Thrainer
from ganeti import compat
30 763ad5be Thomas Thrainer
from ganeti import constants
31 763ad5be Thomas Thrainer
from ganeti import errors
32 763ad5be Thomas Thrainer
from ganeti import ht
33 763ad5be Thomas Thrainer
from ganeti import locking
34 763ad5be Thomas Thrainer
from ganeti.masterd import iallocator
35 763ad5be Thomas Thrainer
from ganeti import objects
36 763ad5be Thomas Thrainer
from ganeti import utils
37 763ad5be Thomas Thrainer
from ganeti import rpc
38 763ad5be Thomas Thrainer
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
39 763ad5be Thomas Thrainer
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
40 1c3231aa Thomas Thrainer
  AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeUuidAndName, \
41 5eacbcae Thomas Thrainer
  CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
42 1f7c8208 Helga Velroyen
  IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes, \
43 1f7c8208 Helga Velroyen
  CheckDiskTemplateEnabled
44 5eacbcae Thomas Thrainer
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
45 5eacbcae Thomas Thrainer
  CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
46 5eacbcae Thomas Thrainer
  BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
47 763ad5be Thomas Thrainer
48 763ad5be Thomas Thrainer
import ganeti.masterd.instance
49 763ad5be Thomas Thrainer
50 763ad5be Thomas Thrainer
51 763ad5be Thomas Thrainer
_DISK_TEMPLATE_NAME_PREFIX = {
52 763ad5be Thomas Thrainer
  constants.DT_PLAIN: "",
53 763ad5be Thomas Thrainer
  constants.DT_RBD: ".rbd",
54 763ad5be Thomas Thrainer
  constants.DT_EXT: ".ext",
55 763ad5be Thomas Thrainer
  }
56 763ad5be Thomas Thrainer
57 763ad5be Thomas Thrainer
58 763ad5be Thomas Thrainer
_DISK_TEMPLATE_DEVICE_TYPE = {
59 763ad5be Thomas Thrainer
  constants.DT_PLAIN: constants.LD_LV,
60 763ad5be Thomas Thrainer
  constants.DT_FILE: constants.LD_FILE,
61 763ad5be Thomas Thrainer
  constants.DT_SHARED_FILE: constants.LD_FILE,
62 763ad5be Thomas Thrainer
  constants.DT_BLOCK: constants.LD_BLOCKDEV,
63 763ad5be Thomas Thrainer
  constants.DT_RBD: constants.LD_RBD,
64 763ad5be Thomas Thrainer
  constants.DT_EXT: constants.LD_EXT,
65 763ad5be Thomas Thrainer
  }
66 763ad5be Thomas Thrainer
67 763ad5be Thomas Thrainer
68 1c3231aa Thomas Thrainer
def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
69 5eacbcae Thomas Thrainer
                         excl_stor):
70 763ad5be Thomas Thrainer
  """Create a single block device on a given node.
71 763ad5be Thomas Thrainer

72 763ad5be Thomas Thrainer
  This will not recurse over children of the device, so they must be
73 763ad5be Thomas Thrainer
  created in advance.
74 763ad5be Thomas Thrainer

75 763ad5be Thomas Thrainer
  @param lu: the lu on whose behalf we execute
76 1c3231aa Thomas Thrainer
  @param node_uuid: the node on which to create the device
77 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
78 763ad5be Thomas Thrainer
  @param instance: the instance which owns the device
79 763ad5be Thomas Thrainer
  @type device: L{objects.Disk}
80 763ad5be Thomas Thrainer
  @param device: the device to create
81 763ad5be Thomas Thrainer
  @param info: the extra 'metadata' we should attach to the device
82 763ad5be Thomas Thrainer
      (this will be represented as a LVM tag)
83 763ad5be Thomas Thrainer
  @type force_open: boolean
84 763ad5be Thomas Thrainer
  @param force_open: this parameter will be passes to the
85 763ad5be Thomas Thrainer
      L{backend.BlockdevCreate} function where it specifies
86 763ad5be Thomas Thrainer
      whether we run on primary or not, and it affects both
87 763ad5be Thomas Thrainer
      the child assembly and the device own Open() execution
88 763ad5be Thomas Thrainer
  @type excl_stor: boolean
89 763ad5be Thomas Thrainer
  @param excl_stor: Whether exclusive_storage is active for the node
90 763ad5be Thomas Thrainer

91 763ad5be Thomas Thrainer
  """
92 1c3231aa Thomas Thrainer
  lu.cfg.SetDiskID(device, node_uuid)
93 1c3231aa Thomas Thrainer
  result = lu.rpc.call_blockdev_create(node_uuid, device, device.size,
94 763ad5be Thomas Thrainer
                                       instance.name, force_open, info,
95 763ad5be Thomas Thrainer
                                       excl_stor)
96 763ad5be Thomas Thrainer
  result.Raise("Can't create block device %s on"
97 1c3231aa Thomas Thrainer
               " node %s for instance %s" % (device,
98 1c3231aa Thomas Thrainer
                                             lu.cfg.GetNodeName(node_uuid),
99 1c3231aa Thomas Thrainer
                                             instance.name))
100 763ad5be Thomas Thrainer
  if device.physical_id is None:
101 763ad5be Thomas Thrainer
    device.physical_id = result.payload
102 763ad5be Thomas Thrainer
103 763ad5be Thomas Thrainer
104 1c3231aa Thomas Thrainer
def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create,
105 763ad5be Thomas Thrainer
                         info, force_open, excl_stor):
106 763ad5be Thomas Thrainer
  """Create a tree of block devices on a given node.
107 763ad5be Thomas Thrainer

108 763ad5be Thomas Thrainer
  If this device type has to be created on secondaries, create it and
109 763ad5be Thomas Thrainer
  all its children.
110 763ad5be Thomas Thrainer

111 763ad5be Thomas Thrainer
  If not, just recurse to children keeping the same 'force' value.
112 763ad5be Thomas Thrainer

113 763ad5be Thomas Thrainer
  @attention: The device has to be annotated already.
114 763ad5be Thomas Thrainer

115 763ad5be Thomas Thrainer
  @param lu: the lu on whose behalf we execute
116 1c3231aa Thomas Thrainer
  @param node_uuid: the node on which to create the device
117 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
118 763ad5be Thomas Thrainer
  @param instance: the instance which owns the device
119 763ad5be Thomas Thrainer
  @type device: L{objects.Disk}
120 763ad5be Thomas Thrainer
  @param device: the device to create
121 763ad5be Thomas Thrainer
  @type force_create: boolean
122 763ad5be Thomas Thrainer
  @param force_create: whether to force creation of this device; this
123 763ad5be Thomas Thrainer
      will be change to True whenever we find a device which has
124 763ad5be Thomas Thrainer
      CreateOnSecondary() attribute
125 763ad5be Thomas Thrainer
  @param info: the extra 'metadata' we should attach to the device
126 763ad5be Thomas Thrainer
      (this will be represented as a LVM tag)
127 763ad5be Thomas Thrainer
  @type force_open: boolean
128 763ad5be Thomas Thrainer
  @param force_open: this parameter will be passes to the
129 763ad5be Thomas Thrainer
      L{backend.BlockdevCreate} function where it specifies
130 763ad5be Thomas Thrainer
      whether we run on primary or not, and it affects both
131 763ad5be Thomas Thrainer
      the child assembly and the device own Open() execution
132 763ad5be Thomas Thrainer
  @type excl_stor: boolean
133 763ad5be Thomas Thrainer
  @param excl_stor: Whether exclusive_storage is active for the node
134 763ad5be Thomas Thrainer

135 763ad5be Thomas Thrainer
  @return: list of created devices
136 763ad5be Thomas Thrainer
  """
137 763ad5be Thomas Thrainer
  created_devices = []
138 763ad5be Thomas Thrainer
  try:
139 763ad5be Thomas Thrainer
    if device.CreateOnSecondary():
140 763ad5be Thomas Thrainer
      force_create = True
141 763ad5be Thomas Thrainer
142 763ad5be Thomas Thrainer
    if device.children:
143 763ad5be Thomas Thrainer
      for child in device.children:
144 1c3231aa Thomas Thrainer
        devs = _CreateBlockDevInner(lu, node_uuid, instance, child,
145 1c3231aa Thomas Thrainer
                                    force_create, info, force_open, excl_stor)
146 763ad5be Thomas Thrainer
        created_devices.extend(devs)
147 763ad5be Thomas Thrainer
148 763ad5be Thomas Thrainer
    if not force_create:
149 763ad5be Thomas Thrainer
      return created_devices
150 763ad5be Thomas Thrainer
151 1c3231aa Thomas Thrainer
    CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
152 5eacbcae Thomas Thrainer
                         excl_stor)
153 763ad5be Thomas Thrainer
    # The device has been completely created, so there is no point in keeping
154 763ad5be Thomas Thrainer
    # its subdevices in the list. We just add the device itself instead.
155 1c3231aa Thomas Thrainer
    created_devices = [(node_uuid, device)]
156 763ad5be Thomas Thrainer
    return created_devices
157 763ad5be Thomas Thrainer
158 763ad5be Thomas Thrainer
  except errors.DeviceCreationError, e:
159 763ad5be Thomas Thrainer
    e.created_devices.extend(created_devices)
160 763ad5be Thomas Thrainer
    raise e
161 763ad5be Thomas Thrainer
  except errors.OpExecError, e:
162 763ad5be Thomas Thrainer
    raise errors.DeviceCreationError(str(e), created_devices)
163 763ad5be Thomas Thrainer
164 763ad5be Thomas Thrainer
165 1c3231aa Thomas Thrainer
def IsExclusiveStorageEnabledNodeUuid(cfg, node_uuid):
166 763ad5be Thomas Thrainer
  """Whether exclusive_storage is in effect for the given node.
167 763ad5be Thomas Thrainer

168 763ad5be Thomas Thrainer
  @type cfg: L{config.ConfigWriter}
169 763ad5be Thomas Thrainer
  @param cfg: The cluster configuration
170 1c3231aa Thomas Thrainer
  @type node_uuid: string
171 1c3231aa Thomas Thrainer
  @param node_uuid: The node UUID
172 763ad5be Thomas Thrainer
  @rtype: bool
173 763ad5be Thomas Thrainer
  @return: The effective value of exclusive_storage
174 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if no node exists with the given name
175 763ad5be Thomas Thrainer

176 763ad5be Thomas Thrainer
  """
177 1c3231aa Thomas Thrainer
  ni = cfg.GetNodeInfo(node_uuid)
178 763ad5be Thomas Thrainer
  if ni is None:
179 1c3231aa Thomas Thrainer
    raise errors.OpPrereqError("Invalid node UUID %s" % node_uuid,
180 763ad5be Thomas Thrainer
                               errors.ECODE_NOENT)
181 5eacbcae Thomas Thrainer
  return IsExclusiveStorageEnabledNode(cfg, ni)
182 763ad5be Thomas Thrainer
183 763ad5be Thomas Thrainer
184 1c3231aa Thomas Thrainer
def _CreateBlockDev(lu, node_uuid, instance, device, force_create, info,
185 763ad5be Thomas Thrainer
                    force_open):
186 763ad5be Thomas Thrainer
  """Wrapper around L{_CreateBlockDevInner}.
187 763ad5be Thomas Thrainer

188 763ad5be Thomas Thrainer
  This method annotates the root device first.
189 763ad5be Thomas Thrainer

190 763ad5be Thomas Thrainer
  """
191 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [device], lu.cfg)
192 1c3231aa Thomas Thrainer
  excl_stor = IsExclusiveStorageEnabledNodeUuid(lu.cfg, node_uuid)
193 1c3231aa Thomas Thrainer
  return _CreateBlockDevInner(lu, node_uuid, instance, disk, force_create, info,
194 763ad5be Thomas Thrainer
                              force_open, excl_stor)
195 763ad5be Thomas Thrainer
196 763ad5be Thomas Thrainer
197 a365b47f Bernardo Dal Seno
def _UndoCreateDisks(lu, disks_created):
198 a365b47f Bernardo Dal Seno
  """Undo the work performed by L{CreateDisks}.
199 a365b47f Bernardo Dal Seno

200 a365b47f Bernardo Dal Seno
  This function is called in case of an error to undo the work of
201 a365b47f Bernardo Dal Seno
  L{CreateDisks}.
202 a365b47f Bernardo Dal Seno

203 a365b47f Bernardo Dal Seno
  @type lu: L{LogicalUnit}
204 a365b47f Bernardo Dal Seno
  @param lu: the logical unit on whose behalf we execute
205 a365b47f Bernardo Dal Seno
  @param disks_created: the result returned by L{CreateDisks}
206 a365b47f Bernardo Dal Seno

207 a365b47f Bernardo Dal Seno
  """
208 1c3231aa Thomas Thrainer
  for (node_uuid, disk) in disks_created:
209 1c3231aa Thomas Thrainer
    lu.cfg.SetDiskID(disk, node_uuid)
210 1c3231aa Thomas Thrainer
    result = lu.rpc.call_blockdev_remove(node_uuid, disk)
211 c7dd65be Klaus Aehlig
    result.Warn("Failed to remove newly-created disk %s on node %s" %
212 1c3231aa Thomas Thrainer
                (disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
213 a365b47f Bernardo Dal Seno
214 a365b47f Bernardo Dal Seno
215 1c3231aa Thomas Thrainer
def CreateDisks(lu, instance, to_skip=None, target_node_uuid=None, disks=None):
216 763ad5be Thomas Thrainer
  """Create all disks for an instance.
217 763ad5be Thomas Thrainer

218 763ad5be Thomas Thrainer
  This abstracts away some work from AddInstance.
219 763ad5be Thomas Thrainer

220 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
221 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
222 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
223 763ad5be Thomas Thrainer
  @param instance: the instance whose disks we should create
224 763ad5be Thomas Thrainer
  @type to_skip: list
225 763ad5be Thomas Thrainer
  @param to_skip: list of indices to skip
226 1c3231aa Thomas Thrainer
  @type target_node_uuid: string
227 1c3231aa Thomas Thrainer
  @param target_node_uuid: if passed, overrides the target node for creation
228 a365b47f Bernardo Dal Seno
  @type disks: list of {objects.Disk}
229 a365b47f Bernardo Dal Seno
  @param disks: the disks to create; if not specified, all the disks of the
230 a365b47f Bernardo Dal Seno
      instance are created
231 a365b47f Bernardo Dal Seno
  @return: information about the created disks, to be used to call
232 a365b47f Bernardo Dal Seno
      L{_UndoCreateDisks}
233 a365b47f Bernardo Dal Seno
  @raise errors.OpPrereqError: in case of error
234 763ad5be Thomas Thrainer

235 763ad5be Thomas Thrainer
  """
236 5eacbcae Thomas Thrainer
  info = GetInstanceInfoText(instance)
237 1c3231aa Thomas Thrainer
  if target_node_uuid is None:
238 1c3231aa Thomas Thrainer
    pnode_uuid = instance.primary_node
239 1c3231aa Thomas Thrainer
    all_node_uuids = instance.all_nodes
240 763ad5be Thomas Thrainer
  else:
241 1c3231aa Thomas Thrainer
    pnode_uuid = target_node_uuid
242 1c3231aa Thomas Thrainer
    all_node_uuids = [pnode_uuid]
243 763ad5be Thomas Thrainer
244 a365b47f Bernardo Dal Seno
  if disks is None:
245 a365b47f Bernardo Dal Seno
    disks = instance.disks
246 a365b47f Bernardo Dal Seno
247 1f7c8208 Helga Velroyen
  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
248 1f7c8208 Helga Velroyen
249 763ad5be Thomas Thrainer
  if instance.disk_template in constants.DTS_FILEBASED:
250 763ad5be Thomas Thrainer
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
251 1c3231aa Thomas Thrainer
    result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
252 763ad5be Thomas Thrainer
253 763ad5be Thomas Thrainer
    result.Raise("Failed to create directory '%s' on"
254 1c3231aa Thomas Thrainer
                 " node %s" % (file_storage_dir,
255 1c3231aa Thomas Thrainer
                               lu.cfg.GetNodeName(pnode_uuid)))
256 763ad5be Thomas Thrainer
257 763ad5be Thomas Thrainer
  disks_created = []
258 a365b47f Bernardo Dal Seno
  for idx, device in enumerate(disks):
259 763ad5be Thomas Thrainer
    if to_skip and idx in to_skip:
260 763ad5be Thomas Thrainer
      continue
261 763ad5be Thomas Thrainer
    logging.info("Creating disk %s for instance '%s'", idx, instance.name)
262 1c3231aa Thomas Thrainer
    for node_uuid in all_node_uuids:
263 1c3231aa Thomas Thrainer
      f_create = node_uuid == pnode_uuid
264 763ad5be Thomas Thrainer
      try:
265 1c3231aa Thomas Thrainer
        _CreateBlockDev(lu, node_uuid, instance, device, f_create, info,
266 1c3231aa Thomas Thrainer
                        f_create)
267 1c3231aa Thomas Thrainer
        disks_created.append((node_uuid, device))
268 763ad5be Thomas Thrainer
      except errors.DeviceCreationError, e:
269 763ad5be Thomas Thrainer
        logging.warning("Creating disk %s for instance '%s' failed",
270 763ad5be Thomas Thrainer
                        idx, instance.name)
271 763ad5be Thomas Thrainer
        disks_created.extend(e.created_devices)
272 a365b47f Bernardo Dal Seno
        _UndoCreateDisks(lu, disks_created)
273 763ad5be Thomas Thrainer
        raise errors.OpExecError(e.message)
274 a365b47f Bernardo Dal Seno
  return disks_created
275 763ad5be Thomas Thrainer
276 763ad5be Thomas Thrainer
277 5eacbcae Thomas Thrainer
def ComputeDiskSizePerVG(disk_template, disks):
278 763ad5be Thomas Thrainer
  """Compute disk size requirements in the volume group
279 763ad5be Thomas Thrainer

280 763ad5be Thomas Thrainer
  """
281 763ad5be Thomas Thrainer
  def _compute(disks, payload):
282 763ad5be Thomas Thrainer
    """Universal algorithm.
283 763ad5be Thomas Thrainer

284 763ad5be Thomas Thrainer
    """
285 763ad5be Thomas Thrainer
    vgs = {}
286 763ad5be Thomas Thrainer
    for disk in disks:
287 763ad5be Thomas Thrainer
      vgs[disk[constants.IDISK_VG]] = \
288 763ad5be Thomas Thrainer
        vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
289 763ad5be Thomas Thrainer
290 763ad5be Thomas Thrainer
    return vgs
291 763ad5be Thomas Thrainer
292 763ad5be Thomas Thrainer
  # Required free disk space as a function of disk and swap space
293 763ad5be Thomas Thrainer
  req_size_dict = {
294 763ad5be Thomas Thrainer
    constants.DT_DISKLESS: {},
295 763ad5be Thomas Thrainer
    constants.DT_PLAIN: _compute(disks, 0),
296 763ad5be Thomas Thrainer
    # 128 MB are added for drbd metadata for each disk
297 763ad5be Thomas Thrainer
    constants.DT_DRBD8: _compute(disks, constants.DRBD_META_SIZE),
298 763ad5be Thomas Thrainer
    constants.DT_FILE: {},
299 763ad5be Thomas Thrainer
    constants.DT_SHARED_FILE: {},
300 763ad5be Thomas Thrainer
    }
301 763ad5be Thomas Thrainer
302 763ad5be Thomas Thrainer
  if disk_template not in req_size_dict:
303 763ad5be Thomas Thrainer
    raise errors.ProgrammerError("Disk template '%s' size requirement"
304 763ad5be Thomas Thrainer
                                 " is unknown" % disk_template)
305 763ad5be Thomas Thrainer
306 763ad5be Thomas Thrainer
  return req_size_dict[disk_template]
307 763ad5be Thomas Thrainer
308 763ad5be Thomas Thrainer
309 5eacbcae Thomas Thrainer
def ComputeDisks(op, default_vg):
310 763ad5be Thomas Thrainer
  """Computes the instance disks.
311 763ad5be Thomas Thrainer

312 763ad5be Thomas Thrainer
  @param op: The instance opcode
313 763ad5be Thomas Thrainer
  @param default_vg: The default_vg to assume
314 763ad5be Thomas Thrainer

315 763ad5be Thomas Thrainer
  @return: The computed disks
316 763ad5be Thomas Thrainer

317 763ad5be Thomas Thrainer
  """
318 763ad5be Thomas Thrainer
  disks = []
319 763ad5be Thomas Thrainer
  for disk in op.disks:
320 763ad5be Thomas Thrainer
    mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
321 763ad5be Thomas Thrainer
    if mode not in constants.DISK_ACCESS_SET:
322 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk access mode '%s'" %
323 763ad5be Thomas Thrainer
                                 mode, errors.ECODE_INVAL)
324 763ad5be Thomas Thrainer
    size = disk.get(constants.IDISK_SIZE, None)
325 763ad5be Thomas Thrainer
    if size is None:
326 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
327 763ad5be Thomas Thrainer
    try:
328 763ad5be Thomas Thrainer
      size = int(size)
329 763ad5be Thomas Thrainer
    except (TypeError, ValueError):
330 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk size '%s'" % size,
331 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
332 763ad5be Thomas Thrainer
333 763ad5be Thomas Thrainer
    ext_provider = disk.get(constants.IDISK_PROVIDER, None)
334 763ad5be Thomas Thrainer
    if ext_provider and op.disk_template != constants.DT_EXT:
335 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The '%s' option is only valid for the %s"
336 763ad5be Thomas Thrainer
                                 " disk template, not %s" %
337 763ad5be Thomas Thrainer
                                 (constants.IDISK_PROVIDER, constants.DT_EXT,
338 763ad5be Thomas Thrainer
                                  op.disk_template), errors.ECODE_INVAL)
339 763ad5be Thomas Thrainer
340 763ad5be Thomas Thrainer
    data_vg = disk.get(constants.IDISK_VG, default_vg)
341 763ad5be Thomas Thrainer
    name = disk.get(constants.IDISK_NAME, None)
342 763ad5be Thomas Thrainer
    if name is not None and name.lower() == constants.VALUE_NONE:
343 763ad5be Thomas Thrainer
      name = None
344 763ad5be Thomas Thrainer
    new_disk = {
345 763ad5be Thomas Thrainer
      constants.IDISK_SIZE: size,
346 763ad5be Thomas Thrainer
      constants.IDISK_MODE: mode,
347 763ad5be Thomas Thrainer
      constants.IDISK_VG: data_vg,
348 763ad5be Thomas Thrainer
      constants.IDISK_NAME: name,
349 763ad5be Thomas Thrainer
      }
350 763ad5be Thomas Thrainer
351 3f3ea14c Bernardo Dal Seno
    for key in [
352 3f3ea14c Bernardo Dal Seno
      constants.IDISK_METAVG,
353 3f3ea14c Bernardo Dal Seno
      constants.IDISK_ADOPT,
354 3f3ea14c Bernardo Dal Seno
      constants.IDISK_SPINDLES,
355 3f3ea14c Bernardo Dal Seno
      ]:
356 3f3ea14c Bernardo Dal Seno
      if key in disk:
357 3f3ea14c Bernardo Dal Seno
        new_disk[key] = disk[key]
358 763ad5be Thomas Thrainer
359 763ad5be Thomas Thrainer
    # For extstorage, demand the `provider' option and add any
360 763ad5be Thomas Thrainer
    # additional parameters (ext-params) to the dict
361 763ad5be Thomas Thrainer
    if op.disk_template == constants.DT_EXT:
362 763ad5be Thomas Thrainer
      if ext_provider:
363 763ad5be Thomas Thrainer
        new_disk[constants.IDISK_PROVIDER] = ext_provider
364 763ad5be Thomas Thrainer
        for key in disk:
365 763ad5be Thomas Thrainer
          if key not in constants.IDISK_PARAMS:
366 763ad5be Thomas Thrainer
            new_disk[key] = disk[key]
367 763ad5be Thomas Thrainer
      else:
368 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Missing provider for template '%s'" %
369 763ad5be Thomas Thrainer
                                   constants.DT_EXT, errors.ECODE_INVAL)
370 763ad5be Thomas Thrainer
371 763ad5be Thomas Thrainer
    disks.append(new_disk)
372 763ad5be Thomas Thrainer
373 763ad5be Thomas Thrainer
  return disks
374 763ad5be Thomas Thrainer
375 763ad5be Thomas Thrainer
376 5eacbcae Thomas Thrainer
def CheckRADOSFreeSpace():
377 763ad5be Thomas Thrainer
  """Compute disk size requirements inside the RADOS cluster.
378 763ad5be Thomas Thrainer

379 763ad5be Thomas Thrainer
  """
380 763ad5be Thomas Thrainer
  # For the RADOS cluster we assume there is always enough space.
381 763ad5be Thomas Thrainer
  pass
382 763ad5be Thomas Thrainer
383 763ad5be Thomas Thrainer
384 1c3231aa Thomas Thrainer
def _GenerateDRBD8Branch(lu, primary_uuid, secondary_uuid, size, vgnames, names,
385 763ad5be Thomas Thrainer
                         iv_name, p_minor, s_minor):
386 763ad5be Thomas Thrainer
  """Generate a drbd8 device complete with its children.
387 763ad5be Thomas Thrainer

388 763ad5be Thomas Thrainer
  """
389 763ad5be Thomas Thrainer
  assert len(vgnames) == len(names) == 2
390 763ad5be Thomas Thrainer
  port = lu.cfg.AllocatePort()
391 763ad5be Thomas Thrainer
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
392 763ad5be Thomas Thrainer
393 763ad5be Thomas Thrainer
  dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
394 763ad5be Thomas Thrainer
                          logical_id=(vgnames[0], names[0]),
395 763ad5be Thomas Thrainer
                          params={})
396 763ad5be Thomas Thrainer
  dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
397 763ad5be Thomas Thrainer
  dev_meta = objects.Disk(dev_type=constants.LD_LV,
398 763ad5be Thomas Thrainer
                          size=constants.DRBD_META_SIZE,
399 763ad5be Thomas Thrainer
                          logical_id=(vgnames[1], names[1]),
400 763ad5be Thomas Thrainer
                          params={})
401 763ad5be Thomas Thrainer
  dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
402 763ad5be Thomas Thrainer
  drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
403 1c3231aa Thomas Thrainer
                          logical_id=(primary_uuid, secondary_uuid, port,
404 763ad5be Thomas Thrainer
                                      p_minor, s_minor,
405 763ad5be Thomas Thrainer
                                      shared_secret),
406 763ad5be Thomas Thrainer
                          children=[dev_data, dev_meta],
407 763ad5be Thomas Thrainer
                          iv_name=iv_name, params={})
408 763ad5be Thomas Thrainer
  drbd_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
409 763ad5be Thomas Thrainer
  return drbd_dev
410 763ad5be Thomas Thrainer
411 763ad5be Thomas Thrainer
412 5eacbcae Thomas Thrainer
def GenerateDiskTemplate(
413 da4a52a3 Thomas Thrainer
  lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
414 763ad5be Thomas Thrainer
  disk_info, file_storage_dir, file_driver, base_index,
415 850c53f1 Helga Velroyen
  feedback_fn, full_disk_params):
416 763ad5be Thomas Thrainer
  """Generate the entire disk layout for a given template type.
417 763ad5be Thomas Thrainer

418 763ad5be Thomas Thrainer
  """
419 763ad5be Thomas Thrainer
  vgname = lu.cfg.GetVGName()
420 763ad5be Thomas Thrainer
  disk_count = len(disk_info)
421 763ad5be Thomas Thrainer
  disks = []
422 763ad5be Thomas Thrainer
423 850c53f1 Helga Velroyen
  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name)
424 850c53f1 Helga Velroyen
425 763ad5be Thomas Thrainer
  if template_name == constants.DT_DISKLESS:
426 763ad5be Thomas Thrainer
    pass
427 763ad5be Thomas Thrainer
  elif template_name == constants.DT_DRBD8:
428 1c3231aa Thomas Thrainer
    if len(secondary_node_uuids) != 1:
429 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Wrong template configuration")
430 1c3231aa Thomas Thrainer
    remote_node_uuid = secondary_node_uuids[0]
431 763ad5be Thomas Thrainer
    minors = lu.cfg.AllocateDRBDMinor(
432 da4a52a3 Thomas Thrainer
      [primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid)
433 763ad5be Thomas Thrainer
434 763ad5be Thomas Thrainer
    (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
435 763ad5be Thomas Thrainer
                                                       full_disk_params)
436 763ad5be Thomas Thrainer
    drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
437 763ad5be Thomas Thrainer
438 763ad5be Thomas Thrainer
    names = []
439 763ad5be Thomas Thrainer
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
440 763ad5be Thomas Thrainer
                                               for i in range(disk_count)]):
441 763ad5be Thomas Thrainer
      names.append(lv_prefix + "_data")
442 763ad5be Thomas Thrainer
      names.append(lv_prefix + "_meta")
443 763ad5be Thomas Thrainer
    for idx, disk in enumerate(disk_info):
444 763ad5be Thomas Thrainer
      disk_index = idx + base_index
445 763ad5be Thomas Thrainer
      data_vg = disk.get(constants.IDISK_VG, vgname)
446 763ad5be Thomas Thrainer
      meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
447 1c3231aa Thomas Thrainer
      disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid,
448 763ad5be Thomas Thrainer
                                      disk[constants.IDISK_SIZE],
449 763ad5be Thomas Thrainer
                                      [data_vg, meta_vg],
450 763ad5be Thomas Thrainer
                                      names[idx * 2:idx * 2 + 2],
451 763ad5be Thomas Thrainer
                                      "disk/%d" % disk_index,
452 763ad5be Thomas Thrainer
                                      minors[idx * 2], minors[idx * 2 + 1])
453 763ad5be Thomas Thrainer
      disk_dev.mode = disk[constants.IDISK_MODE]
454 763ad5be Thomas Thrainer
      disk_dev.name = disk.get(constants.IDISK_NAME, None)
455 763ad5be Thomas Thrainer
      disks.append(disk_dev)
456 763ad5be Thomas Thrainer
  else:
457 1c3231aa Thomas Thrainer
    if secondary_node_uuids:
458 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Wrong template configuration")
459 763ad5be Thomas Thrainer
460 763ad5be Thomas Thrainer
    name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
461 763ad5be Thomas Thrainer
    if name_prefix is None:
462 763ad5be Thomas Thrainer
      names = None
463 763ad5be Thomas Thrainer
    else:
464 763ad5be Thomas Thrainer
      names = _GenerateUniqueNames(lu, ["%s.disk%s" %
465 763ad5be Thomas Thrainer
                                        (name_prefix, base_index + i)
466 763ad5be Thomas Thrainer
                                        for i in range(disk_count)])
467 763ad5be Thomas Thrainer
468 763ad5be Thomas Thrainer
    if template_name == constants.DT_PLAIN:
469 763ad5be Thomas Thrainer
470 763ad5be Thomas Thrainer
      def logical_id_fn(idx, _, disk):
471 763ad5be Thomas Thrainer
        vg = disk.get(constants.IDISK_VG, vgname)
472 763ad5be Thomas Thrainer
        return (vg, names[idx])
473 763ad5be Thomas Thrainer
474 763ad5be Thomas Thrainer
    elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
475 763ad5be Thomas Thrainer
      logical_id_fn = \
476 763ad5be Thomas Thrainer
        lambda _, disk_index, disk: (file_driver,
477 763ad5be Thomas Thrainer
                                     "%s/disk%d" % (file_storage_dir,
478 763ad5be Thomas Thrainer
                                                    disk_index))
479 763ad5be Thomas Thrainer
    elif template_name == constants.DT_BLOCK:
480 763ad5be Thomas Thrainer
      logical_id_fn = \
481 763ad5be Thomas Thrainer
        lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
482 763ad5be Thomas Thrainer
                                       disk[constants.IDISK_ADOPT])
483 763ad5be Thomas Thrainer
    elif template_name == constants.DT_RBD:
484 763ad5be Thomas Thrainer
      logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
485 763ad5be Thomas Thrainer
    elif template_name == constants.DT_EXT:
486 763ad5be Thomas Thrainer
      def logical_id_fn(idx, _, disk):
487 763ad5be Thomas Thrainer
        provider = disk.get(constants.IDISK_PROVIDER, None)
488 763ad5be Thomas Thrainer
        if provider is None:
489 763ad5be Thomas Thrainer
          raise errors.ProgrammerError("Disk template is %s, but '%s' is"
490 763ad5be Thomas Thrainer
                                       " not found", constants.DT_EXT,
491 763ad5be Thomas Thrainer
                                       constants.IDISK_PROVIDER)
492 763ad5be Thomas Thrainer
        return (provider, names[idx])
493 763ad5be Thomas Thrainer
    else:
494 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
495 763ad5be Thomas Thrainer
496 763ad5be Thomas Thrainer
    dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
497 763ad5be Thomas Thrainer
498 763ad5be Thomas Thrainer
    for idx, disk in enumerate(disk_info):
499 763ad5be Thomas Thrainer
      params = {}
500 763ad5be Thomas Thrainer
      # Only for the Ext template add disk_info to params
501 763ad5be Thomas Thrainer
      if template_name == constants.DT_EXT:
502 763ad5be Thomas Thrainer
        params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
503 763ad5be Thomas Thrainer
        for key in disk:
504 763ad5be Thomas Thrainer
          if key not in constants.IDISK_PARAMS:
505 763ad5be Thomas Thrainer
            params[key] = disk[key]
506 763ad5be Thomas Thrainer
      disk_index = idx + base_index
507 763ad5be Thomas Thrainer
      size = disk[constants.IDISK_SIZE]
508 763ad5be Thomas Thrainer
      feedback_fn("* disk %s, size %s" %
509 763ad5be Thomas Thrainer
                  (disk_index, utils.FormatUnit(size, "h")))
510 763ad5be Thomas Thrainer
      disk_dev = objects.Disk(dev_type=dev_type, size=size,
511 763ad5be Thomas Thrainer
                              logical_id=logical_id_fn(idx, disk_index, disk),
512 763ad5be Thomas Thrainer
                              iv_name="disk/%d" % disk_index,
513 763ad5be Thomas Thrainer
                              mode=disk[constants.IDISK_MODE],
514 b54ecf12 Bernardo Dal Seno
                              params=params,
515 b54ecf12 Bernardo Dal Seno
                              spindles=disk.get(constants.IDISK_SPINDLES))
516 763ad5be Thomas Thrainer
      disk_dev.name = disk.get(constants.IDISK_NAME, None)
517 763ad5be Thomas Thrainer
      disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
518 763ad5be Thomas Thrainer
      disks.append(disk_dev)
519 763ad5be Thomas Thrainer
520 763ad5be Thomas Thrainer
  return disks
521 763ad5be Thomas Thrainer
522 763ad5be Thomas Thrainer
523 7c848a6a Bernardo Dal Seno
def CheckSpindlesExclusiveStorage(diskdict, es_flag, required):
524 3f3ea14c Bernardo Dal Seno
  """Check the presence of the spindle options with exclusive_storage.
525 3f3ea14c Bernardo Dal Seno

526 3f3ea14c Bernardo Dal Seno
  @type diskdict: dict
527 3f3ea14c Bernardo Dal Seno
  @param diskdict: disk parameters
528 3f3ea14c Bernardo Dal Seno
  @type es_flag: bool
529 3f3ea14c Bernardo Dal Seno
  @param es_flag: the effective value of the exlusive_storage flag
530 7c848a6a Bernardo Dal Seno
  @type required: bool
531 7c848a6a Bernardo Dal Seno
  @param required: whether spindles are required or just optional
532 3f3ea14c Bernardo Dal Seno
  @raise errors.OpPrereqError when spindles are given and they should not
533 3f3ea14c Bernardo Dal Seno

534 3f3ea14c Bernardo Dal Seno
  """
535 3f3ea14c Bernardo Dal Seno
  if (not es_flag and constants.IDISK_SPINDLES in diskdict and
536 3f3ea14c Bernardo Dal Seno
      diskdict[constants.IDISK_SPINDLES] is not None):
537 3f3ea14c Bernardo Dal Seno
    raise errors.OpPrereqError("Spindles in instance disks cannot be specified"
538 3f3ea14c Bernardo Dal Seno
                               " when exclusive storage is not active",
539 3f3ea14c Bernardo Dal Seno
                               errors.ECODE_INVAL)
540 7c848a6a Bernardo Dal Seno
  if (es_flag and required and (constants.IDISK_SPINDLES not in diskdict or
541 7c848a6a Bernardo Dal Seno
                                diskdict[constants.IDISK_SPINDLES] is None)):
542 7c848a6a Bernardo Dal Seno
    raise errors.OpPrereqError("You must specify spindles in instance disks"
543 7c848a6a Bernardo Dal Seno
                               " when exclusive storage is active",
544 7c848a6a Bernardo Dal Seno
                               errors.ECODE_INVAL)
545 3f3ea14c Bernardo Dal Seno
546 3f3ea14c Bernardo Dal Seno
547 763ad5be Thomas Thrainer
class LUInstanceRecreateDisks(LogicalUnit):
548 763ad5be Thomas Thrainer
  """Recreate an instance's missing disks.
549 763ad5be Thomas Thrainer

550 763ad5be Thomas Thrainer
  """
551 763ad5be Thomas Thrainer
  HPATH = "instance-recreate-disks"
552 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
553 763ad5be Thomas Thrainer
  REQ_BGL = False
554 763ad5be Thomas Thrainer
555 763ad5be Thomas Thrainer
  _MODIFYABLE = compat.UniqueFrozenset([
556 763ad5be Thomas Thrainer
    constants.IDISK_SIZE,
557 763ad5be Thomas Thrainer
    constants.IDISK_MODE,
558 c615590c Bernardo Dal Seno
    constants.IDISK_SPINDLES,
559 763ad5be Thomas Thrainer
    ])
560 763ad5be Thomas Thrainer
561 763ad5be Thomas Thrainer
  # New or changed disk parameters may have different semantics
562 763ad5be Thomas Thrainer
  assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
563 763ad5be Thomas Thrainer
    constants.IDISK_ADOPT,
564 763ad5be Thomas Thrainer
565 763ad5be Thomas Thrainer
    # TODO: Implement support changing VG while recreating
566 763ad5be Thomas Thrainer
    constants.IDISK_VG,
567 763ad5be Thomas Thrainer
    constants.IDISK_METAVG,
568 763ad5be Thomas Thrainer
    constants.IDISK_PROVIDER,
569 763ad5be Thomas Thrainer
    constants.IDISK_NAME,
570 763ad5be Thomas Thrainer
    ]))
571 763ad5be Thomas Thrainer
572 763ad5be Thomas Thrainer
  def _RunAllocator(self):
573 763ad5be Thomas Thrainer
    """Run the allocator based on input opcode.
574 763ad5be Thomas Thrainer

575 763ad5be Thomas Thrainer
    """
576 763ad5be Thomas Thrainer
    be_full = self.cfg.GetClusterInfo().FillBE(self.instance)
577 763ad5be Thomas Thrainer
578 763ad5be Thomas Thrainer
    # FIXME
579 763ad5be Thomas Thrainer
    # The allocator should actually run in "relocate" mode, but current
580 763ad5be Thomas Thrainer
    # allocators don't support relocating all the nodes of an instance at
581 763ad5be Thomas Thrainer
    # the same time. As a workaround we use "allocate" mode, but this is
582 763ad5be Thomas Thrainer
    # suboptimal for two reasons:
583 763ad5be Thomas Thrainer
    # - The instance name passed to the allocator is present in the list of
584 763ad5be Thomas Thrainer
    #   existing instances, so there could be a conflict within the
585 763ad5be Thomas Thrainer
    #   internal structures of the allocator. This doesn't happen with the
586 763ad5be Thomas Thrainer
    #   current allocators, but it's a liability.
587 763ad5be Thomas Thrainer
    # - The allocator counts the resources used by the instance twice: once
588 763ad5be Thomas Thrainer
    #   because the instance exists already, and once because it tries to
589 763ad5be Thomas Thrainer
    #   allocate a new instance.
590 763ad5be Thomas Thrainer
    # The allocator could choose some of the nodes on which the instance is
591 763ad5be Thomas Thrainer
    # running, but that's not a problem. If the instance nodes are broken,
592 763ad5be Thomas Thrainer
    # they should be already be marked as drained or offline, and hence
593 763ad5be Thomas Thrainer
    # skipped by the allocator. If instance disks have been lost for other
594 763ad5be Thomas Thrainer
    # reasons, then recreating the disks on the same nodes should be fine.
595 763ad5be Thomas Thrainer
    disk_template = self.instance.disk_template
596 763ad5be Thomas Thrainer
    spindle_use = be_full[constants.BE_SPINDLE_USE]
597 0e514de1 Bernardo Dal Seno
    disks = [{
598 0e514de1 Bernardo Dal Seno
      constants.IDISK_SIZE: d.size,
599 0e514de1 Bernardo Dal Seno
      constants.IDISK_MODE: d.mode,
600 0e514de1 Bernardo Dal Seno
      constants.IDISK_SPINDLES: d.spindles,
601 0e514de1 Bernardo Dal Seno
      } for d in self.instance.disks]
602 763ad5be Thomas Thrainer
    req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
603 763ad5be Thomas Thrainer
                                        disk_template=disk_template,
604 763ad5be Thomas Thrainer
                                        tags=list(self.instance.GetTags()),
605 763ad5be Thomas Thrainer
                                        os=self.instance.os,
606 763ad5be Thomas Thrainer
                                        nics=[{}],
607 763ad5be Thomas Thrainer
                                        vcpus=be_full[constants.BE_VCPUS],
608 763ad5be Thomas Thrainer
                                        memory=be_full[constants.BE_MAXMEM],
609 763ad5be Thomas Thrainer
                                        spindle_use=spindle_use,
610 0e514de1 Bernardo Dal Seno
                                        disks=disks,
611 763ad5be Thomas Thrainer
                                        hypervisor=self.instance.hypervisor,
612 763ad5be Thomas Thrainer
                                        node_whitelist=None)
613 763ad5be Thomas Thrainer
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
614 763ad5be Thomas Thrainer
615 763ad5be Thomas Thrainer
    ial.Run(self.op.iallocator)
616 763ad5be Thomas Thrainer
617 763ad5be Thomas Thrainer
    assert req.RequiredNodes() == len(self.instance.all_nodes)
618 763ad5be Thomas Thrainer
619 763ad5be Thomas Thrainer
    if not ial.success:
620 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
621 763ad5be Thomas Thrainer
                                 " %s" % (self.op.iallocator, ial.info),
622 763ad5be Thomas Thrainer
                                 errors.ECODE_NORES)
623 763ad5be Thomas Thrainer
624 1c3231aa Thomas Thrainer
    (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, ial.result)
625 763ad5be Thomas Thrainer
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
626 763ad5be Thomas Thrainer
                 self.op.instance_name, self.op.iallocator,
627 1c3231aa Thomas Thrainer
                 utils.CommaJoin(self.op.nodes))
628 763ad5be Thomas Thrainer
629 763ad5be Thomas Thrainer
  def CheckArguments(self):
630 763ad5be Thomas Thrainer
    if self.op.disks and ht.TNonNegativeInt(self.op.disks[0]):
631 763ad5be Thomas Thrainer
      # Normalize and convert deprecated list of disk indices
632 763ad5be Thomas Thrainer
      self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
633 763ad5be Thomas Thrainer
634 763ad5be Thomas Thrainer
    duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
635 763ad5be Thomas Thrainer
    if duplicates:
636 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Some disks have been specified more than"
637 763ad5be Thomas Thrainer
                                 " once: %s" % utils.CommaJoin(duplicates),
638 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
639 763ad5be Thomas Thrainer
640 763ad5be Thomas Thrainer
    # We don't want _CheckIAllocatorOrNode selecting the default iallocator
641 763ad5be Thomas Thrainer
    # when neither iallocator nor nodes are specified
642 763ad5be Thomas Thrainer
    if self.op.iallocator or self.op.nodes:
643 5eacbcae Thomas Thrainer
      CheckIAllocatorOrNode(self, "iallocator", "nodes")
644 763ad5be Thomas Thrainer
645 763ad5be Thomas Thrainer
    for (idx, params) in self.op.disks:
646 763ad5be Thomas Thrainer
      utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
647 763ad5be Thomas Thrainer
      unsupported = frozenset(params.keys()) - self._MODIFYABLE
648 763ad5be Thomas Thrainer
      if unsupported:
649 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Parameters for disk %s try to change"
650 763ad5be Thomas Thrainer
                                   " unmodifyable parameter(s): %s" %
651 763ad5be Thomas Thrainer
                                   (idx, utils.CommaJoin(unsupported)),
652 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
653 763ad5be Thomas Thrainer
654 763ad5be Thomas Thrainer
  def ExpandNames(self):
655 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
656 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
657 763ad5be Thomas Thrainer
658 763ad5be Thomas Thrainer
    if self.op.nodes:
659 1c3231aa Thomas Thrainer
      (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
660 1c3231aa Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = list(self.op.node_uuids)
661 763ad5be Thomas Thrainer
    else:
662 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = []
663 763ad5be Thomas Thrainer
      if self.op.iallocator:
664 763ad5be Thomas Thrainer
        # iallocator will select a new node in the same group
665 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
666 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
667 763ad5be Thomas Thrainer
668 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
669 763ad5be Thomas Thrainer
670 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
671 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODEGROUP:
672 763ad5be Thomas Thrainer
      assert self.op.iallocator is not None
673 763ad5be Thomas Thrainer
      assert not self.op.nodes
674 763ad5be Thomas Thrainer
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
675 763ad5be Thomas Thrainer
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
676 763ad5be Thomas Thrainer
      # Lock the primary group used by the instance optimistically; this
677 763ad5be Thomas Thrainer
      # requires going via the node before it's locked, requiring
678 763ad5be Thomas Thrainer
      # verification later on
679 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
680 da4a52a3 Thomas Thrainer
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid, primary_only=True)
681 763ad5be Thomas Thrainer
682 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE:
683 763ad5be Thomas Thrainer
      # If an allocator is used, then we lock all the nodes in the current
684 763ad5be Thomas Thrainer
      # instance group, as we don't know yet which ones will be selected;
685 763ad5be Thomas Thrainer
      # if we replace the nodes without using an allocator, locks are
686 763ad5be Thomas Thrainer
      # already declared in ExpandNames; otherwise, we need to lock all the
687 763ad5be Thomas Thrainer
      # instance nodes for disk re-creation
688 763ad5be Thomas Thrainer
      if self.op.iallocator:
689 763ad5be Thomas Thrainer
        assert not self.op.nodes
690 763ad5be Thomas Thrainer
        assert not self.needed_locks[locking.LEVEL_NODE]
691 763ad5be Thomas Thrainer
        assert len(self.owned_locks(locking.LEVEL_NODEGROUP)) == 1
692 763ad5be Thomas Thrainer
693 763ad5be Thomas Thrainer
        # Lock member nodes of the group of the primary node
694 763ad5be Thomas Thrainer
        for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP):
695 763ad5be Thomas Thrainer
          self.needed_locks[locking.LEVEL_NODE].extend(
696 763ad5be Thomas Thrainer
            self.cfg.GetNodeGroup(group_uuid).members)
697 763ad5be Thomas Thrainer
698 763ad5be Thomas Thrainer
        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
699 763ad5be Thomas Thrainer
      elif not self.op.nodes:
700 763ad5be Thomas Thrainer
        self._LockInstancesNodes(primary_only=False)
701 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
702 763ad5be Thomas Thrainer
      # Copy node locks
703 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
704 5eacbcae Thomas Thrainer
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
705 763ad5be Thomas Thrainer
706 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
707 763ad5be Thomas Thrainer
    """Build hooks env.
708 763ad5be Thomas Thrainer

709 763ad5be Thomas Thrainer
    This runs on master, primary and secondary nodes of the instance.
710 763ad5be Thomas Thrainer

711 763ad5be Thomas Thrainer
    """
712 5eacbcae Thomas Thrainer
    return BuildInstanceHookEnvByObject(self, self.instance)
713 763ad5be Thomas Thrainer
714 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
715 763ad5be Thomas Thrainer
    """Build hooks nodes.
716 763ad5be Thomas Thrainer

717 763ad5be Thomas Thrainer
    """
718 763ad5be Thomas Thrainer
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
719 763ad5be Thomas Thrainer
    return (nl, nl)
720 763ad5be Thomas Thrainer
721 763ad5be Thomas Thrainer
  def CheckPrereq(self):
722 763ad5be Thomas Thrainer
    """Check prerequisites.
723 763ad5be Thomas Thrainer

724 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster and is not running.
725 763ad5be Thomas Thrainer

726 763ad5be Thomas Thrainer
    """
727 da4a52a3 Thomas Thrainer
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
728 763ad5be Thomas Thrainer
    assert instance is not None, \
729 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
730 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
731 1c3231aa Thomas Thrainer
      if len(self.op.node_uuids) != len(instance.all_nodes):
732 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
733 763ad5be Thomas Thrainer
                                   " %d replacement nodes were specified" %
734 763ad5be Thomas Thrainer
                                   (instance.name, len(instance.all_nodes),
735 1c3231aa Thomas Thrainer
                                    len(self.op.node_uuids)),
736 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
737 763ad5be Thomas Thrainer
      assert instance.disk_template != constants.DT_DRBD8 or \
738 1c3231aa Thomas Thrainer
             len(self.op.node_uuids) == 2
739 763ad5be Thomas Thrainer
      assert instance.disk_template != constants.DT_PLAIN or \
740 1c3231aa Thomas Thrainer
             len(self.op.node_uuids) == 1
741 1c3231aa Thomas Thrainer
      primary_node = self.op.node_uuids[0]
742 763ad5be Thomas Thrainer
    else:
743 763ad5be Thomas Thrainer
      primary_node = instance.primary_node
744 763ad5be Thomas Thrainer
    if not self.op.iallocator:
745 5eacbcae Thomas Thrainer
      CheckNodeOnline(self, primary_node)
746 763ad5be Thomas Thrainer
747 763ad5be Thomas Thrainer
    if instance.disk_template == constants.DT_DISKLESS:
748 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Instance '%s' has no disks" %
749 763ad5be Thomas Thrainer
                                 self.op.instance_name, errors.ECODE_INVAL)
750 763ad5be Thomas Thrainer
751 763ad5be Thomas Thrainer
    # Verify if node group locks are still correct
752 763ad5be Thomas Thrainer
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
753 763ad5be Thomas Thrainer
    if owned_groups:
754 763ad5be Thomas Thrainer
      # Node group locks are acquired only for the primary node (and only
755 763ad5be Thomas Thrainer
      # when the allocator is used)
756 da4a52a3 Thomas Thrainer
      CheckInstanceNodeGroups(self.cfg, instance.uuid, owned_groups,
757 5eacbcae Thomas Thrainer
                              primary_only=True)
758 763ad5be Thomas Thrainer
759 763ad5be Thomas Thrainer
    # if we replace nodes *and* the old primary is offline, we don't
760 763ad5be Thomas Thrainer
    # check the instance state
761 763ad5be Thomas Thrainer
    old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
762 1c3231aa Thomas Thrainer
    if not ((self.op.iallocator or self.op.node_uuids) and old_pnode.offline):
763 5eacbcae Thomas Thrainer
      CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
764 5eacbcae Thomas Thrainer
                         msg="cannot recreate disks")
765 763ad5be Thomas Thrainer
766 763ad5be Thomas Thrainer
    if self.op.disks:
767 763ad5be Thomas Thrainer
      self.disks = dict(self.op.disks)
768 763ad5be Thomas Thrainer
    else:
769 763ad5be Thomas Thrainer
      self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
770 763ad5be Thomas Thrainer
771 763ad5be Thomas Thrainer
    maxidx = max(self.disks.keys())
772 763ad5be Thomas Thrainer
    if maxidx >= len(instance.disks):
773 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
774 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
775 763ad5be Thomas Thrainer
776 1c3231aa Thomas Thrainer
    if ((self.op.node_uuids or self.op.iallocator) and
777 763ad5be Thomas Thrainer
         sorted(self.disks.keys()) != range(len(instance.disks))):
778 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't recreate disks partially and"
779 763ad5be Thomas Thrainer
                                 " change the nodes at the same time",
780 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
781 763ad5be Thomas Thrainer
782 763ad5be Thomas Thrainer
    self.instance = instance
783 763ad5be Thomas Thrainer
784 763ad5be Thomas Thrainer
    if self.op.iallocator:
785 763ad5be Thomas Thrainer
      self._RunAllocator()
786 763ad5be Thomas Thrainer
      # Release unneeded node and node resource locks
787 1c3231aa Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.node_uuids)
788 1c3231aa Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.node_uuids)
789 5eacbcae Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
790 763ad5be Thomas Thrainer
791 763ad5be Thomas Thrainer
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
792 763ad5be Thomas Thrainer
793 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
794 1c3231aa Thomas Thrainer
      node_uuids = self.op.node_uuids
795 3f3ea14c Bernardo Dal Seno
    else:
796 1c3231aa Thomas Thrainer
      node_uuids = instance.all_nodes
797 3f3ea14c Bernardo Dal Seno
    excl_stor = compat.any(
798 1c3231aa Thomas Thrainer
      rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
799 3f3ea14c Bernardo Dal Seno
      )
800 3f3ea14c Bernardo Dal Seno
    for new_params in self.disks.values():
801 7c848a6a Bernardo Dal Seno
      CheckSpindlesExclusiveStorage(new_params, excl_stor, False)
802 3f3ea14c Bernardo Dal Seno
803 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
804 763ad5be Thomas Thrainer
    """Recreate the disks.
805 763ad5be Thomas Thrainer

806 763ad5be Thomas Thrainer
    """
807 763ad5be Thomas Thrainer
    assert (self.owned_locks(locking.LEVEL_NODE) ==
808 763ad5be Thomas Thrainer
            self.owned_locks(locking.LEVEL_NODE_RES))
809 763ad5be Thomas Thrainer
810 763ad5be Thomas Thrainer
    to_skip = []
811 763ad5be Thomas Thrainer
    mods = [] # keeps track of needed changes
812 763ad5be Thomas Thrainer
813 d0d7d7cf Thomas Thrainer
    for idx, disk in enumerate(self.instance.disks):
814 763ad5be Thomas Thrainer
      try:
815 763ad5be Thomas Thrainer
        changes = self.disks[idx]
816 763ad5be Thomas Thrainer
      except KeyError:
817 763ad5be Thomas Thrainer
        # Disk should not be recreated
818 763ad5be Thomas Thrainer
        to_skip.append(idx)
819 763ad5be Thomas Thrainer
        continue
820 763ad5be Thomas Thrainer
821 763ad5be Thomas Thrainer
      # update secondaries for disks, if needed
822 1c3231aa Thomas Thrainer
      if self.op.node_uuids and disk.dev_type == constants.LD_DRBD8:
823 763ad5be Thomas Thrainer
        # need to update the nodes and minors
824 1c3231aa Thomas Thrainer
        assert len(self.op.node_uuids) == 2
825 763ad5be Thomas Thrainer
        assert len(disk.logical_id) == 6 # otherwise disk internals
826 763ad5be Thomas Thrainer
                                         # have changed
827 763ad5be Thomas Thrainer
        (_, _, old_port, _, _, old_secret) = disk.logical_id
828 1c3231aa Thomas Thrainer
        new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids,
829 da4a52a3 Thomas Thrainer
                                                self.instance.uuid)
830 1c3231aa Thomas Thrainer
        new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port,
831 763ad5be Thomas Thrainer
                  new_minors[0], new_minors[1], old_secret)
832 763ad5be Thomas Thrainer
        assert len(disk.logical_id) == len(new_id)
833 763ad5be Thomas Thrainer
      else:
834 763ad5be Thomas Thrainer
        new_id = None
835 763ad5be Thomas Thrainer
836 763ad5be Thomas Thrainer
      mods.append((idx, new_id, changes))
837 763ad5be Thomas Thrainer
838 763ad5be Thomas Thrainer
    # now that we have passed all asserts above, we can apply the mods
839 763ad5be Thomas Thrainer
    # in a single run (to avoid partial changes)
840 763ad5be Thomas Thrainer
    for idx, new_id, changes in mods:
841 d0d7d7cf Thomas Thrainer
      disk = self.instance.disks[idx]
842 763ad5be Thomas Thrainer
      if new_id is not None:
843 763ad5be Thomas Thrainer
        assert disk.dev_type == constants.LD_DRBD8
844 763ad5be Thomas Thrainer
        disk.logical_id = new_id
845 763ad5be Thomas Thrainer
      if changes:
846 763ad5be Thomas Thrainer
        disk.Update(size=changes.get(constants.IDISK_SIZE, None),
847 b54ecf12 Bernardo Dal Seno
                    mode=changes.get(constants.IDISK_MODE, None),
848 b54ecf12 Bernardo Dal Seno
                    spindles=changes.get(constants.IDISK_SPINDLES, None))
849 763ad5be Thomas Thrainer
850 763ad5be Thomas Thrainer
    # change primary node, if needed
851 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
852 d0d7d7cf Thomas Thrainer
      self.instance.primary_node = self.op.node_uuids[0]
853 763ad5be Thomas Thrainer
      self.LogWarning("Changing the instance's nodes, you will have to"
854 763ad5be Thomas Thrainer
                      " remove any disks left on the older nodes manually")
855 763ad5be Thomas Thrainer
856 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
857 d0d7d7cf Thomas Thrainer
      self.cfg.Update(self.instance, feedback_fn)
858 763ad5be Thomas Thrainer
859 763ad5be Thomas Thrainer
    # All touched nodes must be locked
860 763ad5be Thomas Thrainer
    mylocks = self.owned_locks(locking.LEVEL_NODE)
861 d0d7d7cf Thomas Thrainer
    assert mylocks.issuperset(frozenset(self.instance.all_nodes))
862 d0d7d7cf Thomas Thrainer
    new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
863 a365b47f Bernardo Dal Seno
864 a365b47f Bernardo Dal Seno
    # TODO: Release node locks before wiping, or explain why it's not possible
865 a365b47f Bernardo Dal Seno
    if self.cfg.GetClusterInfo().prealloc_wipe_disks:
866 a365b47f Bernardo Dal Seno
      wipedisks = [(idx, disk, 0)
867 d0d7d7cf Thomas Thrainer
                   for (idx, disk) in enumerate(self.instance.disks)
868 a365b47f Bernardo Dal Seno
                   if idx not in to_skip]
869 d0d7d7cf Thomas Thrainer
      WipeOrCleanupDisks(self, self.instance, disks=wipedisks,
870 d0d7d7cf Thomas Thrainer
                         cleanup=new_disks)
871 763ad5be Thomas Thrainer
872 763ad5be Thomas Thrainer
873 d90f0cb4 Helga Velroyen
def _PerformNodeInfoCall(lu, node_uuids, vg):
874 d90f0cb4 Helga Velroyen
  """Prepares the input and performs a node info call.
875 d90f0cb4 Helga Velroyen

876 d90f0cb4 Helga Velroyen
  @type lu: C{LogicalUnit}
877 d90f0cb4 Helga Velroyen
  @param lu: a logical unit from which we get configuration data
878 d90f0cb4 Helga Velroyen
  @type node_uuids: list of string
879 d90f0cb4 Helga Velroyen
  @param node_uuids: list of node UUIDs to perform the call for
880 d90f0cb4 Helga Velroyen
  @type vg: string
881 d90f0cb4 Helga Velroyen
  @param vg: the volume group's name
882 d90f0cb4 Helga Velroyen

883 d90f0cb4 Helga Velroyen
  """
884 d90f0cb4 Helga Velroyen
  lvm_storage_units = [(constants.ST_LVM_VG, vg)]
885 d90f0cb4 Helga Velroyen
  storage_units = rpc.PrepareStorageUnitsForNodes(lu.cfg, lvm_storage_units,
886 d90f0cb4 Helga Velroyen
                                                  node_uuids)
887 d90f0cb4 Helga Velroyen
  hvname = lu.cfg.GetHypervisorType()
888 d90f0cb4 Helga Velroyen
  hvparams = lu.cfg.GetClusterInfo().hvparams
889 d90f0cb4 Helga Velroyen
  nodeinfo = lu.rpc.call_node_info(node_uuids, storage_units,
890 d90f0cb4 Helga Velroyen
                                   [(hvname, hvparams[hvname])])
891 d90f0cb4 Helga Velroyen
  return nodeinfo
892 d90f0cb4 Helga Velroyen
893 d90f0cb4 Helga Velroyen
894 d90f0cb4 Helga Velroyen
def _CheckVgCapacityForNode(node_name, node_info, vg, requested):
895 d90f0cb4 Helga Velroyen
  """Checks the vg capacity for a given node.
896 d90f0cb4 Helga Velroyen

897 d90f0cb4 Helga Velroyen
  @type node_info: tuple (_, list of dicts, _)
898 d90f0cb4 Helga Velroyen
  @param node_info: the result of the node info call for one node
899 d90f0cb4 Helga Velroyen
  @type node_name: string
900 d90f0cb4 Helga Velroyen
  @param node_name: the name of the node
901 d90f0cb4 Helga Velroyen
  @type vg: string
902 d90f0cb4 Helga Velroyen
  @param vg: volume group name
903 d90f0cb4 Helga Velroyen
  @type requested: int
904 d90f0cb4 Helga Velroyen
  @param requested: the amount of disk in MiB to check for
905 d90f0cb4 Helga Velroyen
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
906 d90f0cb4 Helga Velroyen
      or we cannot check the node
907 d90f0cb4 Helga Velroyen

908 d90f0cb4 Helga Velroyen
  """
909 d90f0cb4 Helga Velroyen
  (_, space_info, _) = node_info
910 d90f0cb4 Helga Velroyen
  lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
911 d90f0cb4 Helga Velroyen
      space_info, constants.ST_LVM_VG)
912 d90f0cb4 Helga Velroyen
  if not lvm_vg_info:
913 d90f0cb4 Helga Velroyen
    raise errors.OpPrereqError("Can't retrieve storage information for LVM")
914 d90f0cb4 Helga Velroyen
  vg_free = lvm_vg_info.get("storage_free", None)
915 d90f0cb4 Helga Velroyen
  if not isinstance(vg_free, int):
916 d90f0cb4 Helga Velroyen
    raise errors.OpPrereqError("Can't compute free disk space on node"
917 d90f0cb4 Helga Velroyen
                               " %s for vg %s, result was '%s'" %
918 d90f0cb4 Helga Velroyen
                               (node_name, vg, vg_free), errors.ECODE_ENVIRON)
919 d90f0cb4 Helga Velroyen
  if requested > vg_free:
920 d90f0cb4 Helga Velroyen
    raise errors.OpPrereqError("Not enough disk space on target node %s"
921 d90f0cb4 Helga Velroyen
                               " vg %s: required %d MiB, available %d MiB" %
922 d90f0cb4 Helga Velroyen
                               (node_name, vg, requested, vg_free),
923 d90f0cb4 Helga Velroyen
                               errors.ECODE_NORES)
924 d90f0cb4 Helga Velroyen
925 d90f0cb4 Helga Velroyen
926 1c3231aa Thomas Thrainer
def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
927 763ad5be Thomas Thrainer
  """Checks if nodes have enough free disk space in the specified VG.
928 763ad5be Thomas Thrainer

929 763ad5be Thomas Thrainer
  This function checks if all given nodes have the needed amount of
930 763ad5be Thomas Thrainer
  free disk. In case any node has less disk or we cannot get the
931 763ad5be Thomas Thrainer
  information from the node, this function raises an OpPrereqError
932 763ad5be Thomas Thrainer
  exception.
933 763ad5be Thomas Thrainer

934 763ad5be Thomas Thrainer
  @type lu: C{LogicalUnit}
935 763ad5be Thomas Thrainer
  @param lu: a logical unit from which we get configuration data
936 1c3231aa Thomas Thrainer
  @type node_uuids: C{list}
937 1c3231aa Thomas Thrainer
  @param node_uuids: the list of node UUIDs to check
938 763ad5be Thomas Thrainer
  @type vg: C{str}
939 763ad5be Thomas Thrainer
  @param vg: the volume group to check
940 763ad5be Thomas Thrainer
  @type requested: C{int}
941 763ad5be Thomas Thrainer
  @param requested: the amount of disk in MiB to check for
942 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
943 763ad5be Thomas Thrainer
      or we cannot check the node
944 763ad5be Thomas Thrainer

945 763ad5be Thomas Thrainer
  """
946 d90f0cb4 Helga Velroyen
  nodeinfo = _PerformNodeInfoCall(lu, node_uuids, vg)
947 1c3231aa Thomas Thrainer
  for node in node_uuids:
948 1c3231aa Thomas Thrainer
    node_name = lu.cfg.GetNodeName(node)
949 763ad5be Thomas Thrainer
    info = nodeinfo[node]
950 1c3231aa Thomas Thrainer
    info.Raise("Cannot get current information from node %s" % node_name,
951 763ad5be Thomas Thrainer
               prereq=True, ecode=errors.ECODE_ENVIRON)
952 d90f0cb4 Helga Velroyen
    _CheckVgCapacityForNode(node_name, info.payload, vg, requested)
953 763ad5be Thomas Thrainer
954 763ad5be Thomas Thrainer
955 1c3231aa Thomas Thrainer
def CheckNodesFreeDiskPerVG(lu, node_uuids, req_sizes):
956 763ad5be Thomas Thrainer
  """Checks if nodes have enough free disk space in all the VGs.
957 763ad5be Thomas Thrainer

958 763ad5be Thomas Thrainer
  This function checks if all given nodes have the needed amount of
959 763ad5be Thomas Thrainer
  free disk. In case any node has less disk or we cannot get the
960 763ad5be Thomas Thrainer
  information from the node, this function raises an OpPrereqError
961 763ad5be Thomas Thrainer
  exception.
962 763ad5be Thomas Thrainer

963 763ad5be Thomas Thrainer
  @type lu: C{LogicalUnit}
964 763ad5be Thomas Thrainer
  @param lu: a logical unit from which we get configuration data
965 1c3231aa Thomas Thrainer
  @type node_uuids: C{list}
966 1c3231aa Thomas Thrainer
  @param node_uuids: the list of node UUIDs to check
967 763ad5be Thomas Thrainer
  @type req_sizes: C{dict}
968 763ad5be Thomas Thrainer
  @param req_sizes: the hash of vg and corresponding amount of disk in
969 763ad5be Thomas Thrainer
      MiB to check for
970 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
971 763ad5be Thomas Thrainer
      or we cannot check the node
972 763ad5be Thomas Thrainer

973 763ad5be Thomas Thrainer
  """
974 763ad5be Thomas Thrainer
  for vg, req_size in req_sizes.items():
975 1c3231aa Thomas Thrainer
    _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, req_size)
976 763ad5be Thomas Thrainer
977 763ad5be Thomas Thrainer
978 763ad5be Thomas Thrainer
def _DiskSizeInBytesToMebibytes(lu, size):
979 763ad5be Thomas Thrainer
  """Converts a disk size in bytes to mebibytes.
980 763ad5be Thomas Thrainer

981 763ad5be Thomas Thrainer
  Warns and rounds up if the size isn't an even multiple of 1 MiB.
982 763ad5be Thomas Thrainer

983 763ad5be Thomas Thrainer
  """
984 763ad5be Thomas Thrainer
  (mib, remainder) = divmod(size, 1024 * 1024)
985 763ad5be Thomas Thrainer
986 763ad5be Thomas Thrainer
  if remainder != 0:
987 763ad5be Thomas Thrainer
    lu.LogWarning("Disk size is not an even multiple of 1 MiB; rounding up"
988 763ad5be Thomas Thrainer
                  " to not overwrite existing data (%s bytes will not be"
989 763ad5be Thomas Thrainer
                  " wiped)", (1024 * 1024) - remainder)
990 763ad5be Thomas Thrainer
    mib += 1
991 763ad5be Thomas Thrainer
992 763ad5be Thomas Thrainer
  return mib
993 763ad5be Thomas Thrainer
994 763ad5be Thomas Thrainer
995 763ad5be Thomas Thrainer
def _CalcEta(time_taken, written, total_size):
996 763ad5be Thomas Thrainer
  """Calculates the ETA based on size written and total size.
997 763ad5be Thomas Thrainer

998 763ad5be Thomas Thrainer
  @param time_taken: The time taken so far
999 763ad5be Thomas Thrainer
  @param written: amount written so far
1000 763ad5be Thomas Thrainer
  @param total_size: The total size of data to be written
1001 763ad5be Thomas Thrainer
  @return: The remaining time in seconds
1002 763ad5be Thomas Thrainer

1003 763ad5be Thomas Thrainer
  """
1004 763ad5be Thomas Thrainer
  avg_time = time_taken / float(written)
1005 763ad5be Thomas Thrainer
  return (total_size - written) * avg_time
1006 763ad5be Thomas Thrainer
1007 763ad5be Thomas Thrainer
1008 5eacbcae Thomas Thrainer
def WipeDisks(lu, instance, disks=None):
1009 763ad5be Thomas Thrainer
  """Wipes instance disks.
1010 763ad5be Thomas Thrainer

1011 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
1012 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
1013 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
1014 763ad5be Thomas Thrainer
  @param instance: the instance whose disks we should create
1015 763ad5be Thomas Thrainer
  @type disks: None or list of tuple of (number, L{objects.Disk}, number)
1016 763ad5be Thomas Thrainer
  @param disks: Disk details; tuple contains disk index, disk object and the
1017 763ad5be Thomas Thrainer
    start offset
1018 763ad5be Thomas Thrainer

1019 763ad5be Thomas Thrainer
  """
1020 1c3231aa Thomas Thrainer
  node_uuid = instance.primary_node
1021 1c3231aa Thomas Thrainer
  node_name = lu.cfg.GetNodeName(node_uuid)
1022 763ad5be Thomas Thrainer
1023 763ad5be Thomas Thrainer
  if disks is None:
1024 763ad5be Thomas Thrainer
    disks = [(idx, disk, 0)
1025 763ad5be Thomas Thrainer
             for (idx, disk) in enumerate(instance.disks)]
1026 763ad5be Thomas Thrainer
1027 763ad5be Thomas Thrainer
  for (_, device, _) in disks:
1028 1c3231aa Thomas Thrainer
    lu.cfg.SetDiskID(device, node_uuid)
1029 763ad5be Thomas Thrainer
1030 763ad5be Thomas Thrainer
  logging.info("Pausing synchronization of disks of instance '%s'",
1031 763ad5be Thomas Thrainer
               instance.name)
1032 1c3231aa Thomas Thrainer
  result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
1033 763ad5be Thomas Thrainer
                                                  (map(compat.snd, disks),
1034 763ad5be Thomas Thrainer
                                                   instance),
1035 763ad5be Thomas Thrainer
                                                  True)
1036 1c3231aa Thomas Thrainer
  result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
1037 763ad5be Thomas Thrainer
1038 763ad5be Thomas Thrainer
  for idx, success in enumerate(result.payload):
1039 763ad5be Thomas Thrainer
    if not success:
1040 763ad5be Thomas Thrainer
      logging.warn("Pausing synchronization of disk %s of instance '%s'"
1041 763ad5be Thomas Thrainer
                   " failed", idx, instance.name)
1042 763ad5be Thomas Thrainer
1043 763ad5be Thomas Thrainer
  try:
1044 763ad5be Thomas Thrainer
    for (idx, device, offset) in disks:
1045 763ad5be Thomas Thrainer
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
1046 763ad5be Thomas Thrainer
      # MAX_WIPE_CHUNK at max. Truncating to integer to avoid rounding errors.
1047 763ad5be Thomas Thrainer
      wipe_chunk_size = \
1048 763ad5be Thomas Thrainer
        int(min(constants.MAX_WIPE_CHUNK,
1049 763ad5be Thomas Thrainer
                device.size / 100.0 * constants.MIN_WIPE_CHUNK_PERCENT))
1050 763ad5be Thomas Thrainer
1051 763ad5be Thomas Thrainer
      size = device.size
1052 763ad5be Thomas Thrainer
      last_output = 0
1053 763ad5be Thomas Thrainer
      start_time = time.time()
1054 763ad5be Thomas Thrainer
1055 763ad5be Thomas Thrainer
      if offset == 0:
1056 763ad5be Thomas Thrainer
        info_text = ""
1057 763ad5be Thomas Thrainer
      else:
1058 763ad5be Thomas Thrainer
        info_text = (" (from %s to %s)" %
1059 763ad5be Thomas Thrainer
                     (utils.FormatUnit(offset, "h"),
1060 763ad5be Thomas Thrainer
                      utils.FormatUnit(size, "h")))
1061 763ad5be Thomas Thrainer
1062 763ad5be Thomas Thrainer
      lu.LogInfo("* Wiping disk %s%s", idx, info_text)
1063 763ad5be Thomas Thrainer
1064 763ad5be Thomas Thrainer
      logging.info("Wiping disk %d for instance %s on node %s using"
1065 1c3231aa Thomas Thrainer
                   " chunk size %s", idx, instance.name, node_name,
1066 1c3231aa Thomas Thrainer
                   wipe_chunk_size)
1067 763ad5be Thomas Thrainer
1068 763ad5be Thomas Thrainer
      while offset < size:
1069 763ad5be Thomas Thrainer
        wipe_size = min(wipe_chunk_size, size - offset)
1070 763ad5be Thomas Thrainer
1071 763ad5be Thomas Thrainer
        logging.debug("Wiping disk %d, offset %s, chunk %s",
1072 763ad5be Thomas Thrainer
                      idx, offset, wipe_size)
1073 763ad5be Thomas Thrainer
1074 1c3231aa Thomas Thrainer
        result = lu.rpc.call_blockdev_wipe(node_uuid, (device, instance),
1075 1c3231aa Thomas Thrainer
                                           offset, wipe_size)
1076 763ad5be Thomas Thrainer
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
1077 763ad5be Thomas Thrainer
                     (idx, offset, wipe_size))
1078 763ad5be Thomas Thrainer
1079 763ad5be Thomas Thrainer
        now = time.time()
1080 763ad5be Thomas Thrainer
        offset += wipe_size
1081 763ad5be Thomas Thrainer
        if now - last_output >= 60:
1082 763ad5be Thomas Thrainer
          eta = _CalcEta(now - start_time, offset, size)
1083 763ad5be Thomas Thrainer
          lu.LogInfo(" - done: %.1f%% ETA: %s",
1084 763ad5be Thomas Thrainer
                     offset / float(size) * 100, utils.FormatSeconds(eta))
1085 763ad5be Thomas Thrainer
          last_output = now
1086 763ad5be Thomas Thrainer
  finally:
1087 763ad5be Thomas Thrainer
    logging.info("Resuming synchronization of disks for instance '%s'",
1088 763ad5be Thomas Thrainer
                 instance.name)
1089 763ad5be Thomas Thrainer
1090 1c3231aa Thomas Thrainer
    result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
1091 763ad5be Thomas Thrainer
                                                    (map(compat.snd, disks),
1092 763ad5be Thomas Thrainer
                                                     instance),
1093 763ad5be Thomas Thrainer
                                                    False)
1094 763ad5be Thomas Thrainer
1095 763ad5be Thomas Thrainer
    if result.fail_msg:
1096 763ad5be Thomas Thrainer
      lu.LogWarning("Failed to resume disk synchronization on node '%s': %s",
1097 1c3231aa Thomas Thrainer
                    node_name, result.fail_msg)
1098 763ad5be Thomas Thrainer
    else:
1099 763ad5be Thomas Thrainer
      for idx, success in enumerate(result.payload):
1100 763ad5be Thomas Thrainer
        if not success:
1101 763ad5be Thomas Thrainer
          lu.LogWarning("Resuming synchronization of disk %s of instance '%s'"
1102 763ad5be Thomas Thrainer
                        " failed", idx, instance.name)
1103 763ad5be Thomas Thrainer
1104 763ad5be Thomas Thrainer
1105 a365b47f Bernardo Dal Seno
def WipeOrCleanupDisks(lu, instance, disks=None, cleanup=None):
1106 a365b47f Bernardo Dal Seno
  """Wrapper for L{WipeDisks} that handles errors.
1107 a365b47f Bernardo Dal Seno

1108 a365b47f Bernardo Dal Seno
  @type lu: L{LogicalUnit}
1109 a365b47f Bernardo Dal Seno
  @param lu: the logical unit on whose behalf we execute
1110 a365b47f Bernardo Dal Seno
  @type instance: L{objects.Instance}
1111 a365b47f Bernardo Dal Seno
  @param instance: the instance whose disks we should wipe
1112 a365b47f Bernardo Dal Seno
  @param disks: see L{WipeDisks}
1113 a365b47f Bernardo Dal Seno
  @param cleanup: the result returned by L{CreateDisks}, used for cleanup in
1114 a365b47f Bernardo Dal Seno
      case of error
1115 a365b47f Bernardo Dal Seno
  @raise errors.OpPrereqError: in case of failure
1116 a365b47f Bernardo Dal Seno

1117 a365b47f Bernardo Dal Seno
  """
1118 a365b47f Bernardo Dal Seno
  try:
1119 a365b47f Bernardo Dal Seno
    WipeDisks(lu, instance, disks=disks)
1120 a365b47f Bernardo Dal Seno
  except errors.OpExecError:
1121 a365b47f Bernardo Dal Seno
    logging.warning("Wiping disks for instance '%s' failed",
1122 a365b47f Bernardo Dal Seno
                    instance.name)
1123 a365b47f Bernardo Dal Seno
    _UndoCreateDisks(lu, cleanup)
1124 a365b47f Bernardo Dal Seno
    raise
1125 a365b47f Bernardo Dal Seno
1126 a365b47f Bernardo Dal Seno
1127 5eacbcae Thomas Thrainer
def ExpandCheckDisks(instance, disks):
1128 763ad5be Thomas Thrainer
  """Return the instance disks selected by the disks list
1129 763ad5be Thomas Thrainer

1130 763ad5be Thomas Thrainer
  @type disks: list of L{objects.Disk} or None
1131 763ad5be Thomas Thrainer
  @param disks: selected disks
1132 763ad5be Thomas Thrainer
  @rtype: list of L{objects.Disk}
1133 763ad5be Thomas Thrainer
  @return: selected instance disks to act on
1134 763ad5be Thomas Thrainer

1135 763ad5be Thomas Thrainer
  """
1136 763ad5be Thomas Thrainer
  if disks is None:
1137 763ad5be Thomas Thrainer
    return instance.disks
1138 763ad5be Thomas Thrainer
  else:
1139 763ad5be Thomas Thrainer
    if not set(disks).issubset(instance.disks):
1140 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Can only act on disks belonging to the"
1141 328201a5 Guido Trotter
                                   " target instance: expected a subset of %r,"
1142 328201a5 Guido Trotter
                                   " got %r" % (instance.disks, disks))
1143 763ad5be Thomas Thrainer
    return disks
1144 763ad5be Thomas Thrainer
1145 763ad5be Thomas Thrainer
1146 5eacbcae Thomas Thrainer
def WaitForSync(lu, instance, disks=None, oneshot=False):
1147 763ad5be Thomas Thrainer
  """Sleep and poll for an instance's disk to sync.
1148 763ad5be Thomas Thrainer

1149 763ad5be Thomas Thrainer
  """
1150 763ad5be Thomas Thrainer
  if not instance.disks or disks is not None and not disks:
1151 763ad5be Thomas Thrainer
    return True
1152 763ad5be Thomas Thrainer
1153 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1154 763ad5be Thomas Thrainer
1155 763ad5be Thomas Thrainer
  if not oneshot:
1156 763ad5be Thomas Thrainer
    lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
1157 763ad5be Thomas Thrainer
1158 1c3231aa Thomas Thrainer
  node_uuid = instance.primary_node
1159 1c3231aa Thomas Thrainer
  node_name = lu.cfg.GetNodeName(node_uuid)
1160 763ad5be Thomas Thrainer
1161 763ad5be Thomas Thrainer
  for dev in disks:
1162 1c3231aa Thomas Thrainer
    lu.cfg.SetDiskID(dev, node_uuid)
1163 763ad5be Thomas Thrainer
1164 763ad5be Thomas Thrainer
  # TODO: Convert to utils.Retry
1165 763ad5be Thomas Thrainer
1166 763ad5be Thomas Thrainer
  retries = 0
1167 763ad5be Thomas Thrainer
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1168 763ad5be Thomas Thrainer
  while True:
1169 763ad5be Thomas Thrainer
    max_time = 0
1170 763ad5be Thomas Thrainer
    done = True
1171 763ad5be Thomas Thrainer
    cumul_degraded = False
1172 1c3231aa Thomas Thrainer
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node_uuid, (disks, instance))
1173 763ad5be Thomas Thrainer
    msg = rstats.fail_msg
1174 763ad5be Thomas Thrainer
    if msg:
1175 1c3231aa Thomas Thrainer
      lu.LogWarning("Can't get any data from node %s: %s", node_name, msg)
1176 763ad5be Thomas Thrainer
      retries += 1
1177 763ad5be Thomas Thrainer
      if retries >= 10:
1178 763ad5be Thomas Thrainer
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1179 1c3231aa Thomas Thrainer
                                 " aborting." % node_name)
1180 763ad5be Thomas Thrainer
      time.sleep(6)
1181 763ad5be Thomas Thrainer
      continue
1182 763ad5be Thomas Thrainer
    rstats = rstats.payload
1183 763ad5be Thomas Thrainer
    retries = 0
1184 763ad5be Thomas Thrainer
    for i, mstat in enumerate(rstats):
1185 763ad5be Thomas Thrainer
      if mstat is None:
1186 763ad5be Thomas Thrainer
        lu.LogWarning("Can't compute data for node %s/%s",
1187 1c3231aa Thomas Thrainer
                      node_name, disks[i].iv_name)
1188 763ad5be Thomas Thrainer
        continue
1189 763ad5be Thomas Thrainer
1190 763ad5be Thomas Thrainer
      cumul_degraded = (cumul_degraded or
1191 763ad5be Thomas Thrainer
                        (mstat.is_degraded and mstat.sync_percent is None))
1192 763ad5be Thomas Thrainer
      if mstat.sync_percent is not None:
1193 763ad5be Thomas Thrainer
        done = False
1194 763ad5be Thomas Thrainer
        if mstat.estimated_time is not None:
1195 763ad5be Thomas Thrainer
          rem_time = ("%s remaining (estimated)" %
1196 763ad5be Thomas Thrainer
                      utils.FormatSeconds(mstat.estimated_time))
1197 763ad5be Thomas Thrainer
          max_time = mstat.estimated_time
1198 763ad5be Thomas Thrainer
        else:
1199 763ad5be Thomas Thrainer
          rem_time = "no time estimate"
1200 763ad5be Thomas Thrainer
        lu.LogInfo("- device %s: %5.2f%% done, %s",
1201 763ad5be Thomas Thrainer
                   disks[i].iv_name, mstat.sync_percent, rem_time)
1202 763ad5be Thomas Thrainer
1203 763ad5be Thomas Thrainer
    # if we're done but degraded, let's do a few small retries, to
1204 763ad5be Thomas Thrainer
    # make sure we see a stable and not transient situation; therefore
1205 763ad5be Thomas Thrainer
    # we force restart of the loop
1206 763ad5be Thomas Thrainer
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1207 763ad5be Thomas Thrainer
      logging.info("Degraded disks found, %d retries left", degr_retries)
1208 763ad5be Thomas Thrainer
      degr_retries -= 1
1209 763ad5be Thomas Thrainer
      time.sleep(1)
1210 763ad5be Thomas Thrainer
      continue
1211 763ad5be Thomas Thrainer
1212 763ad5be Thomas Thrainer
    if done or oneshot:
1213 763ad5be Thomas Thrainer
      break
1214 763ad5be Thomas Thrainer
1215 763ad5be Thomas Thrainer
    time.sleep(min(60, max_time))
1216 763ad5be Thomas Thrainer
1217 763ad5be Thomas Thrainer
  if done:
1218 763ad5be Thomas Thrainer
    lu.LogInfo("Instance %s's disks are in sync", instance.name)
1219 763ad5be Thomas Thrainer
1220 763ad5be Thomas Thrainer
  return not cumul_degraded
1221 763ad5be Thomas Thrainer
1222 763ad5be Thomas Thrainer
1223 5eacbcae Thomas Thrainer
def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
1224 763ad5be Thomas Thrainer
  """Shutdown block devices of an instance.
1225 763ad5be Thomas Thrainer

1226 763ad5be Thomas Thrainer
  This does the shutdown on all nodes of the instance.
1227 763ad5be Thomas Thrainer

1228 763ad5be Thomas Thrainer
  If the ignore_primary is false, errors on the primary node are
1229 763ad5be Thomas Thrainer
  ignored.
1230 763ad5be Thomas Thrainer

1231 763ad5be Thomas Thrainer
  """
1232 da4a52a3 Thomas Thrainer
  lu.cfg.MarkInstanceDisksInactive(instance.uuid)
1233 763ad5be Thomas Thrainer
  all_result = True
1234 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1235 763ad5be Thomas Thrainer
1236 763ad5be Thomas Thrainer
  for disk in disks:
1237 1c3231aa Thomas Thrainer
    for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
1238 1c3231aa Thomas Thrainer
      lu.cfg.SetDiskID(top_disk, node_uuid)
1239 1c3231aa Thomas Thrainer
      result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance))
1240 763ad5be Thomas Thrainer
      msg = result.fail_msg
1241 763ad5be Thomas Thrainer
      if msg:
1242 763ad5be Thomas Thrainer
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
1243 1c3231aa Thomas Thrainer
                      disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
1244 1c3231aa Thomas Thrainer
        if ((node_uuid == instance.primary_node and not ignore_primary) or
1245 1c3231aa Thomas Thrainer
            (node_uuid != instance.primary_node and not result.offline)):
1246 763ad5be Thomas Thrainer
          all_result = False
1247 763ad5be Thomas Thrainer
  return all_result
1248 763ad5be Thomas Thrainer
1249 763ad5be Thomas Thrainer
1250 763ad5be Thomas Thrainer
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
1251 763ad5be Thomas Thrainer
  """Shutdown block devices of an instance.
1252 763ad5be Thomas Thrainer

1253 763ad5be Thomas Thrainer
  This function checks if an instance is running, before calling
1254 763ad5be Thomas Thrainer
  _ShutdownInstanceDisks.
1255 763ad5be Thomas Thrainer

1256 763ad5be Thomas Thrainer
  """
1257 5eacbcae Thomas Thrainer
  CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
1258 5eacbcae Thomas Thrainer
  ShutdownInstanceDisks(lu, instance, disks=disks)
1259 763ad5be Thomas Thrainer
1260 763ad5be Thomas Thrainer
1261 5eacbcae Thomas Thrainer
def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
1262 74676af4 Dimitris Aragiorgis
                          ignore_size=False, check=True):
1263 763ad5be Thomas Thrainer
  """Prepare the block devices for an instance.
1264 763ad5be Thomas Thrainer

1265 763ad5be Thomas Thrainer
  This sets up the block devices on all nodes.
1266 763ad5be Thomas Thrainer

1267 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
1268 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
1269 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
1270 763ad5be Thomas Thrainer
  @param instance: the instance for whose disks we assemble
1271 763ad5be Thomas Thrainer
  @type disks: list of L{objects.Disk} or None
1272 763ad5be Thomas Thrainer
  @param disks: which disks to assemble (or all, if None)
1273 763ad5be Thomas Thrainer
  @type ignore_secondaries: boolean
1274 763ad5be Thomas Thrainer
  @param ignore_secondaries: if true, errors on secondary nodes
1275 763ad5be Thomas Thrainer
      won't result in an error return from the function
1276 763ad5be Thomas Thrainer
  @type ignore_size: boolean
1277 763ad5be Thomas Thrainer
  @param ignore_size: if true, the current known size of the disk
1278 763ad5be Thomas Thrainer
      will not be used during the disk activation, useful for cases
1279 763ad5be Thomas Thrainer
      when the size is wrong
1280 763ad5be Thomas Thrainer
  @return: False if the operation failed, otherwise a list of
1281 763ad5be Thomas Thrainer
      (host, instance_visible_name, node_visible_name)
1282 763ad5be Thomas Thrainer
      with the mapping from node devices to instance devices
1283 763ad5be Thomas Thrainer

1284 763ad5be Thomas Thrainer
  """
1285 763ad5be Thomas Thrainer
  device_info = []
1286 763ad5be Thomas Thrainer
  disks_ok = True
1287 74676af4 Dimitris Aragiorgis
  if check:
1288 74676af4 Dimitris Aragiorgis
    disks = ExpandCheckDisks(instance, disks)
1289 763ad5be Thomas Thrainer
1290 763ad5be Thomas Thrainer
  # With the two passes mechanism we try to reduce the window of
1291 763ad5be Thomas Thrainer
  # opportunity for the race condition of switching DRBD to primary
1292 763ad5be Thomas Thrainer
  # before handshaking occured, but we do not eliminate it
1293 763ad5be Thomas Thrainer
1294 763ad5be Thomas Thrainer
  # The proper fix would be to wait (with some limits) until the
1295 763ad5be Thomas Thrainer
  # connection has been made and drbd transitions from WFConnection
1296 763ad5be Thomas Thrainer
  # into any other network-connected state (Connected, SyncTarget,
1297 763ad5be Thomas Thrainer
  # SyncSource, etc.)
1298 763ad5be Thomas Thrainer
1299 1d4a4b26 Thomas Thrainer
  # mark instance disks as active before doing actual work, so watcher does
1300 1d4a4b26 Thomas Thrainer
  # not try to shut them down erroneously
1301 da4a52a3 Thomas Thrainer
  lu.cfg.MarkInstanceDisksActive(instance.uuid)
1302 1d4a4b26 Thomas Thrainer
1303 763ad5be Thomas Thrainer
  # 1st pass, assemble on all nodes in secondary mode
1304 763ad5be Thomas Thrainer
  for idx, inst_disk in enumerate(disks):
1305 1c3231aa Thomas Thrainer
    for node_uuid, node_disk in inst_disk.ComputeNodeTree(
1306 1c3231aa Thomas Thrainer
                                  instance.primary_node):
1307 763ad5be Thomas Thrainer
      if ignore_size:
1308 763ad5be Thomas Thrainer
        node_disk = node_disk.Copy()
1309 763ad5be Thomas Thrainer
        node_disk.UnsetSize()
1310 1c3231aa Thomas Thrainer
      lu.cfg.SetDiskID(node_disk, node_uuid)
1311 1c3231aa Thomas Thrainer
      result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
1312 da4a52a3 Thomas Thrainer
                                             instance.name, False, idx)
1313 763ad5be Thomas Thrainer
      msg = result.fail_msg
1314 763ad5be Thomas Thrainer
      if msg:
1315 1c3231aa Thomas Thrainer
        is_offline_secondary = (node_uuid in instance.secondary_nodes and
1316 763ad5be Thomas Thrainer
                                result.offline)
1317 763ad5be Thomas Thrainer
        lu.LogWarning("Could not prepare block device %s on node %s"
1318 763ad5be Thomas Thrainer
                      " (is_primary=False, pass=1): %s",
1319 1c3231aa Thomas Thrainer
                      inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
1320 763ad5be Thomas Thrainer
        if not (ignore_secondaries or is_offline_secondary):
1321 763ad5be Thomas Thrainer
          disks_ok = False
1322 763ad5be Thomas Thrainer
1323 763ad5be Thomas Thrainer
  # FIXME: race condition on drbd migration to primary
1324 763ad5be Thomas Thrainer
1325 763ad5be Thomas Thrainer
  # 2nd pass, do only the primary node
1326 763ad5be Thomas Thrainer
  for idx, inst_disk in enumerate(disks):
1327 763ad5be Thomas Thrainer
    dev_path = None
1328 763ad5be Thomas Thrainer
1329 1c3231aa Thomas Thrainer
    for node_uuid, node_disk in inst_disk.ComputeNodeTree(
1330 1c3231aa Thomas Thrainer
                                  instance.primary_node):
1331 1c3231aa Thomas Thrainer
      if node_uuid != instance.primary_node:
1332 763ad5be Thomas Thrainer
        continue
1333 763ad5be Thomas Thrainer
      if ignore_size:
1334 763ad5be Thomas Thrainer
        node_disk = node_disk.Copy()
1335 763ad5be Thomas Thrainer
        node_disk.UnsetSize()
1336 1c3231aa Thomas Thrainer
      lu.cfg.SetDiskID(node_disk, node_uuid)
1337 1c3231aa Thomas Thrainer
      result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
1338 da4a52a3 Thomas Thrainer
                                             instance.name, True, idx)
1339 763ad5be Thomas Thrainer
      msg = result.fail_msg
1340 763ad5be Thomas Thrainer
      if msg:
1341 763ad5be Thomas Thrainer
        lu.LogWarning("Could not prepare block device %s on node %s"
1342 763ad5be Thomas Thrainer
                      " (is_primary=True, pass=2): %s",
1343 1c3231aa Thomas Thrainer
                      inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
1344 763ad5be Thomas Thrainer
        disks_ok = False
1345 763ad5be Thomas Thrainer
      else:
1346 763ad5be Thomas Thrainer
        dev_path = result.payload
1347 763ad5be Thomas Thrainer
1348 1c3231aa Thomas Thrainer
    device_info.append((lu.cfg.GetNodeName(instance.primary_node),
1349 1c3231aa Thomas Thrainer
                        inst_disk.iv_name, dev_path))
1350 763ad5be Thomas Thrainer
1351 763ad5be Thomas Thrainer
  # leave the disks configured for the primary node
1352 763ad5be Thomas Thrainer
  # this is a workaround that would be fixed better by
1353 763ad5be Thomas Thrainer
  # improving the logical/physical id handling
1354 763ad5be Thomas Thrainer
  for disk in disks:
1355 763ad5be Thomas Thrainer
    lu.cfg.SetDiskID(disk, instance.primary_node)
1356 763ad5be Thomas Thrainer
1357 1d4a4b26 Thomas Thrainer
  if not disks_ok:
1358 da4a52a3 Thomas Thrainer
    lu.cfg.MarkInstanceDisksInactive(instance.uuid)
1359 1d4a4b26 Thomas Thrainer
1360 763ad5be Thomas Thrainer
  return disks_ok, device_info
1361 763ad5be Thomas Thrainer
1362 763ad5be Thomas Thrainer
1363 5eacbcae Thomas Thrainer
def StartInstanceDisks(lu, instance, force):
1364 763ad5be Thomas Thrainer
  """Start the disks of an instance.
1365 763ad5be Thomas Thrainer

1366 763ad5be Thomas Thrainer
  """
1367 5eacbcae Thomas Thrainer
  disks_ok, _ = AssembleInstanceDisks(lu, instance,
1368 5eacbcae Thomas Thrainer
                                      ignore_secondaries=force)
1369 763ad5be Thomas Thrainer
  if not disks_ok:
1370 5eacbcae Thomas Thrainer
    ShutdownInstanceDisks(lu, instance)
1371 763ad5be Thomas Thrainer
    if force is not None and not force:
1372 763ad5be Thomas Thrainer
      lu.LogWarning("",
1373 763ad5be Thomas Thrainer
                    hint=("If the message above refers to a secondary node,"
1374 763ad5be Thomas Thrainer
                          " you can retry the operation using '--force'"))
1375 763ad5be Thomas Thrainer
    raise errors.OpExecError("Disk consistency error")
1376 763ad5be Thomas Thrainer
1377 763ad5be Thomas Thrainer
1378 763ad5be Thomas Thrainer
class LUInstanceGrowDisk(LogicalUnit):
1379 763ad5be Thomas Thrainer
  """Grow a disk of an instance.
1380 763ad5be Thomas Thrainer

1381 763ad5be Thomas Thrainer
  """
1382 763ad5be Thomas Thrainer
  HPATH = "disk-grow"
1383 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
1384 763ad5be Thomas Thrainer
  REQ_BGL = False
1385 763ad5be Thomas Thrainer
1386 763ad5be Thomas Thrainer
  def ExpandNames(self):
1387 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1388 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1389 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1390 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1391 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
1392 763ad5be Thomas Thrainer
1393 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1394 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1395 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1396 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
1397 763ad5be Thomas Thrainer
      # Copy node locks
1398 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1399 5eacbcae Thomas Thrainer
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1400 763ad5be Thomas Thrainer
1401 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
1402 763ad5be Thomas Thrainer
    """Build hooks env.
1403 763ad5be Thomas Thrainer

1404 763ad5be Thomas Thrainer
    This runs on the master, the primary and all the secondaries.
1405 763ad5be Thomas Thrainer

1406 763ad5be Thomas Thrainer
    """
1407 763ad5be Thomas Thrainer
    env = {
1408 763ad5be Thomas Thrainer
      "DISK": self.op.disk,
1409 763ad5be Thomas Thrainer
      "AMOUNT": self.op.amount,
1410 763ad5be Thomas Thrainer
      "ABSOLUTE": self.op.absolute,
1411 763ad5be Thomas Thrainer
      }
1412 5eacbcae Thomas Thrainer
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1413 763ad5be Thomas Thrainer
    return env
1414 763ad5be Thomas Thrainer
1415 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
1416 763ad5be Thomas Thrainer
    """Build hooks nodes.
1417 763ad5be Thomas Thrainer

1418 763ad5be Thomas Thrainer
    """
1419 763ad5be Thomas Thrainer
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1420 763ad5be Thomas Thrainer
    return (nl, nl)
1421 763ad5be Thomas Thrainer
1422 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1423 763ad5be Thomas Thrainer
    """Check prerequisites.
1424 763ad5be Thomas Thrainer

1425 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1426 763ad5be Thomas Thrainer

1427 763ad5be Thomas Thrainer
    """
1428 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1429 da4a52a3 Thomas Thrainer
    assert self.instance is not None, \
1430 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1431 da4a52a3 Thomas Thrainer
    node_uuids = list(self.instance.all_nodes)
1432 1c3231aa Thomas Thrainer
    for node_uuid in node_uuids:
1433 1c3231aa Thomas Thrainer
      CheckNodeOnline(self, node_uuid)
1434 e43a624e Bernardo Dal Seno
    self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
1435 763ad5be Thomas Thrainer
1436 da4a52a3 Thomas Thrainer
    if self.instance.disk_template not in constants.DTS_GROWABLE:
1437 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Instance's disk layout does not support"
1438 763ad5be Thomas Thrainer
                                 " growing", errors.ECODE_INVAL)
1439 763ad5be Thomas Thrainer
1440 da4a52a3 Thomas Thrainer
    self.disk = self.instance.FindDisk(self.op.disk)
1441 763ad5be Thomas Thrainer
1442 763ad5be Thomas Thrainer
    if self.op.absolute:
1443 763ad5be Thomas Thrainer
      self.target = self.op.amount
1444 763ad5be Thomas Thrainer
      self.delta = self.target - self.disk.size
1445 763ad5be Thomas Thrainer
      if self.delta < 0:
1446 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Requested size (%s) is smaller than "
1447 763ad5be Thomas Thrainer
                                   "current disk size (%s)" %
1448 763ad5be Thomas Thrainer
                                   (utils.FormatUnit(self.target, "h"),
1449 763ad5be Thomas Thrainer
                                    utils.FormatUnit(self.disk.size, "h")),
1450 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
1451 763ad5be Thomas Thrainer
    else:
1452 763ad5be Thomas Thrainer
      self.delta = self.op.amount
1453 763ad5be Thomas Thrainer
      self.target = self.disk.size + self.delta
1454 763ad5be Thomas Thrainer
      if self.delta < 0:
1455 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Requested increment (%s) is negative" %
1456 763ad5be Thomas Thrainer
                                   utils.FormatUnit(self.delta, "h"),
1457 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
1458 763ad5be Thomas Thrainer
1459 1c3231aa Thomas Thrainer
    self._CheckDiskSpace(node_uuids, self.disk.ComputeGrowth(self.delta))
1460 763ad5be Thomas Thrainer
1461 1c3231aa Thomas Thrainer
  def _CheckDiskSpace(self, node_uuids, req_vgspace):
1462 763ad5be Thomas Thrainer
    template = self.instance.disk_template
1463 8e5a911a Bernardo Dal Seno
    if (template not in (constants.DTS_NO_FREE_SPACE_CHECK) and
1464 8e5a911a Bernardo Dal Seno
        not any(self.node_es_flags.values())):
1465 763ad5be Thomas Thrainer
      # TODO: check the free disk space for file, when that feature will be
1466 763ad5be Thomas Thrainer
      # supported
1467 8e5a911a Bernardo Dal Seno
      # With exclusive storage we need to do something smarter than just looking
1468 8e5a911a Bernardo Dal Seno
      # at free space, which, in the end, is basically a dry run. So we rely on
1469 8e5a911a Bernardo Dal Seno
      # the dry run performed in Exec() instead.
1470 1c3231aa Thomas Thrainer
      CheckNodesFreeDiskPerVG(self, node_uuids, req_vgspace)
1471 763ad5be Thomas Thrainer
1472 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1473 763ad5be Thomas Thrainer
    """Execute disk grow.
1474 763ad5be Thomas Thrainer

1475 763ad5be Thomas Thrainer
    """
1476 d0d7d7cf Thomas Thrainer
    assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1477 763ad5be Thomas Thrainer
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1478 763ad5be Thomas Thrainer
            self.owned_locks(locking.LEVEL_NODE_RES))
1479 763ad5be Thomas Thrainer
1480 763ad5be Thomas Thrainer
    wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
1481 763ad5be Thomas Thrainer
1482 d0d7d7cf Thomas Thrainer
    disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[self.disk])
1483 763ad5be Thomas Thrainer
    if not disks_ok:
1484 763ad5be Thomas Thrainer
      raise errors.OpExecError("Cannot activate block device to grow")
1485 763ad5be Thomas Thrainer
1486 763ad5be Thomas Thrainer
    feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
1487 d0d7d7cf Thomas Thrainer
                (self.op.disk, self.instance.name,
1488 763ad5be Thomas Thrainer
                 utils.FormatUnit(self.delta, "h"),
1489 763ad5be Thomas Thrainer
                 utils.FormatUnit(self.target, "h")))
1490 763ad5be Thomas Thrainer
1491 763ad5be Thomas Thrainer
    # First run all grow ops in dry-run mode
1492 d0d7d7cf Thomas Thrainer
    for node_uuid in self.instance.all_nodes:
1493 d0d7d7cf Thomas Thrainer
      self.cfg.SetDiskID(self.disk, node_uuid)
1494 d0d7d7cf Thomas Thrainer
      result = self.rpc.call_blockdev_grow(node_uuid,
1495 d0d7d7cf Thomas Thrainer
                                           (self.disk, self.instance),
1496 e43a624e Bernardo Dal Seno
                                           self.delta, True, True,
1497 e43a624e Bernardo Dal Seno
                                           self.node_es_flags[node_uuid])
1498 1c3231aa Thomas Thrainer
      result.Raise("Dry-run grow request failed to node %s" %
1499 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(node_uuid))
1500 763ad5be Thomas Thrainer
1501 763ad5be Thomas Thrainer
    if wipe_disks:
1502 763ad5be Thomas Thrainer
      # Get disk size from primary node for wiping
1503 dad226e3 Thomas Thrainer
      self.cfg.SetDiskID(self.disk, self.instance.primary_node)
1504 d0d7d7cf Thomas Thrainer
      result = self.rpc.call_blockdev_getdimensions(self.instance.primary_node,
1505 d0d7d7cf Thomas Thrainer
                                                    [self.disk])
1506 763ad5be Thomas Thrainer
      result.Raise("Failed to retrieve disk size from node '%s'" %
1507 d0d7d7cf Thomas Thrainer
                   self.instance.primary_node)
1508 763ad5be Thomas Thrainer
1509 6ef8077e Bernardo Dal Seno
      (disk_dimensions, ) = result.payload
1510 763ad5be Thomas Thrainer
1511 6ef8077e Bernardo Dal Seno
      if disk_dimensions is None:
1512 763ad5be Thomas Thrainer
        raise errors.OpExecError("Failed to retrieve disk size from primary"
1513 d0d7d7cf Thomas Thrainer
                                 " node '%s'" % self.instance.primary_node)
1514 6ef8077e Bernardo Dal Seno
      (disk_size_in_bytes, _) = disk_dimensions
1515 763ad5be Thomas Thrainer
1516 763ad5be Thomas Thrainer
      old_disk_size = _DiskSizeInBytesToMebibytes(self, disk_size_in_bytes)
1517 763ad5be Thomas Thrainer
1518 d0d7d7cf Thomas Thrainer
      assert old_disk_size >= self.disk.size, \
1519 763ad5be Thomas Thrainer
        ("Retrieved disk size too small (got %s, should be at least %s)" %
1520 d0d7d7cf Thomas Thrainer
         (old_disk_size, self.disk.size))
1521 763ad5be Thomas Thrainer
    else:
1522 763ad5be Thomas Thrainer
      old_disk_size = None
1523 763ad5be Thomas Thrainer
1524 763ad5be Thomas Thrainer
    # We know that (as far as we can test) operations across different
1525 763ad5be Thomas Thrainer
    # nodes will succeed, time to run it for real on the backing storage
1526 d0d7d7cf Thomas Thrainer
    for node_uuid in self.instance.all_nodes:
1527 d0d7d7cf Thomas Thrainer
      self.cfg.SetDiskID(self.disk, node_uuid)
1528 d0d7d7cf Thomas Thrainer
      result = self.rpc.call_blockdev_grow(node_uuid,
1529 d0d7d7cf Thomas Thrainer
                                           (self.disk, self.instance),
1530 e43a624e Bernardo Dal Seno
                                           self.delta, False, True,
1531 e43a624e Bernardo Dal Seno
                                           self.node_es_flags[node_uuid])
1532 1c3231aa Thomas Thrainer
      result.Raise("Grow request failed to node %s" %
1533 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(node_uuid))
1534 763ad5be Thomas Thrainer
1535 763ad5be Thomas Thrainer
    # And now execute it for logical storage, on the primary node
1536 d0d7d7cf Thomas Thrainer
    node_uuid = self.instance.primary_node
1537 d0d7d7cf Thomas Thrainer
    self.cfg.SetDiskID(self.disk, node_uuid)
1538 d0d7d7cf Thomas Thrainer
    result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance),
1539 e43a624e Bernardo Dal Seno
                                         self.delta, False, False,
1540 e43a624e Bernardo Dal Seno
                                         self.node_es_flags[node_uuid])
1541 1c3231aa Thomas Thrainer
    result.Raise("Grow request failed to node %s" %
1542 1c3231aa Thomas Thrainer
                 self.cfg.GetNodeName(node_uuid))
1543 763ad5be Thomas Thrainer
1544 d0d7d7cf Thomas Thrainer
    self.disk.RecordGrow(self.delta)
1545 d0d7d7cf Thomas Thrainer
    self.cfg.Update(self.instance, feedback_fn)
1546 763ad5be Thomas Thrainer
1547 763ad5be Thomas Thrainer
    # Changes have been recorded, release node lock
1548 5eacbcae Thomas Thrainer
    ReleaseLocks(self, locking.LEVEL_NODE)
1549 763ad5be Thomas Thrainer
1550 763ad5be Thomas Thrainer
    # Downgrade lock while waiting for sync
1551 763ad5be Thomas Thrainer
    self.glm.downgrade(locking.LEVEL_INSTANCE)
1552 763ad5be Thomas Thrainer
1553 763ad5be Thomas Thrainer
    assert wipe_disks ^ (old_disk_size is None)
1554 763ad5be Thomas Thrainer
1555 763ad5be Thomas Thrainer
    if wipe_disks:
1556 d0d7d7cf Thomas Thrainer
      assert self.instance.disks[self.op.disk] == self.disk
1557 763ad5be Thomas Thrainer
1558 763ad5be Thomas Thrainer
      # Wipe newly added disk space
1559 d0d7d7cf Thomas Thrainer
      WipeDisks(self, self.instance,
1560 d0d7d7cf Thomas Thrainer
                disks=[(self.op.disk, self.disk, old_disk_size)])
1561 763ad5be Thomas Thrainer
1562 763ad5be Thomas Thrainer
    if self.op.wait_for_sync:
1563 d0d7d7cf Thomas Thrainer
      disk_abort = not WaitForSync(self, self.instance, disks=[self.disk])
1564 763ad5be Thomas Thrainer
      if disk_abort:
1565 763ad5be Thomas Thrainer
        self.LogWarning("Disk syncing has not returned a good status; check"
1566 763ad5be Thomas Thrainer
                        " the instance")
1567 d0d7d7cf Thomas Thrainer
      if not self.instance.disks_active:
1568 d0d7d7cf Thomas Thrainer
        _SafeShutdownInstanceDisks(self, self.instance, disks=[self.disk])
1569 d0d7d7cf Thomas Thrainer
    elif not self.instance.disks_active:
1570 763ad5be Thomas Thrainer
      self.LogWarning("Not shutting down the disk even if the instance is"
1571 763ad5be Thomas Thrainer
                      " not supposed to be running because no wait for"
1572 763ad5be Thomas Thrainer
                      " sync mode was requested")
1573 763ad5be Thomas Thrainer
1574 763ad5be Thomas Thrainer
    assert self.owned_locks(locking.LEVEL_NODE_RES)
1575 d0d7d7cf Thomas Thrainer
    assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1576 763ad5be Thomas Thrainer
1577 763ad5be Thomas Thrainer
1578 763ad5be Thomas Thrainer
class LUInstanceReplaceDisks(LogicalUnit):
1579 763ad5be Thomas Thrainer
  """Replace the disks of an instance.
1580 763ad5be Thomas Thrainer

1581 763ad5be Thomas Thrainer
  """
1582 763ad5be Thomas Thrainer
  HPATH = "mirrors-replace"
1583 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
1584 763ad5be Thomas Thrainer
  REQ_BGL = False
1585 763ad5be Thomas Thrainer
1586 763ad5be Thomas Thrainer
  def CheckArguments(self):
1587 763ad5be Thomas Thrainer
    """Check arguments.
1588 763ad5be Thomas Thrainer

1589 763ad5be Thomas Thrainer
    """
1590 763ad5be Thomas Thrainer
    if self.op.mode == constants.REPLACE_DISK_CHG:
1591 d0d7d7cf Thomas Thrainer
      if self.op.remote_node is None and self.op.iallocator is None:
1592 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("When changing the secondary either an"
1593 763ad5be Thomas Thrainer
                                   " iallocator script must be used or the"
1594 763ad5be Thomas Thrainer
                                   " new node given", errors.ECODE_INVAL)
1595 763ad5be Thomas Thrainer
      else:
1596 5eacbcae Thomas Thrainer
        CheckIAllocatorOrNode(self, "iallocator", "remote_node")
1597 763ad5be Thomas Thrainer
1598 d0d7d7cf Thomas Thrainer
    elif self.op.remote_node is not None or self.op.iallocator is not None:
1599 763ad5be Thomas Thrainer
      # Not replacing the secondary
1600 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The iallocator and new node options can"
1601 763ad5be Thomas Thrainer
                                 " only be used when changing the"
1602 763ad5be Thomas Thrainer
                                 " secondary node", errors.ECODE_INVAL)
1603 763ad5be Thomas Thrainer
1604 763ad5be Thomas Thrainer
  def ExpandNames(self):
1605 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1606 763ad5be Thomas Thrainer
1607 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODE not in self.needed_locks
1608 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODE_RES not in self.needed_locks
1609 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODEGROUP not in self.needed_locks
1610 763ad5be Thomas Thrainer
1611 763ad5be Thomas Thrainer
    assert self.op.iallocator is None or self.op.remote_node is None, \
1612 763ad5be Thomas Thrainer
      "Conflicting options"
1613 763ad5be Thomas Thrainer
1614 763ad5be Thomas Thrainer
    if self.op.remote_node is not None:
1615 1c3231aa Thomas Thrainer
      (self.op.remote_node_uuid, self.op.remote_node) = \
1616 1c3231aa Thomas Thrainer
        ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
1617 1c3231aa Thomas Thrainer
                              self.op.remote_node)
1618 763ad5be Thomas Thrainer
1619 763ad5be Thomas Thrainer
      # Warning: do not remove the locking of the new secondary here
1620 1bb99a33 Bernardo Dal Seno
      # unless DRBD8Dev.AddChildren is changed to work in parallel;
1621 763ad5be Thomas Thrainer
      # currently it doesn't since parallel invocations of
1622 763ad5be Thomas Thrainer
      # FindUnusedMinor will conflict
1623 1c3231aa Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node_uuid]
1624 763ad5be Thomas Thrainer
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1625 763ad5be Thomas Thrainer
    else:
1626 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = []
1627 763ad5be Thomas Thrainer
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1628 763ad5be Thomas Thrainer
1629 763ad5be Thomas Thrainer
      if self.op.iallocator is not None:
1630 763ad5be Thomas Thrainer
        # iallocator will select a new node in the same group
1631 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
1632 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
1633 763ad5be Thomas Thrainer
1634 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1635 763ad5be Thomas Thrainer
1636 da4a52a3 Thomas Thrainer
    self.replacer = TLReplaceDisks(self, self.op.instance_uuid,
1637 da4a52a3 Thomas Thrainer
                                   self.op.instance_name, self.op.mode,
1638 1c3231aa Thomas Thrainer
                                   self.op.iallocator, self.op.remote_node_uuid,
1639 763ad5be Thomas Thrainer
                                   self.op.disks, self.op.early_release,
1640 763ad5be Thomas Thrainer
                                   self.op.ignore_ipolicy)
1641 763ad5be Thomas Thrainer
1642 763ad5be Thomas Thrainer
    self.tasklets = [self.replacer]
1643 763ad5be Thomas Thrainer
1644 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1645 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODEGROUP:
1646 1c3231aa Thomas Thrainer
      assert self.op.remote_node_uuid is None
1647 763ad5be Thomas Thrainer
      assert self.op.iallocator is not None
1648 763ad5be Thomas Thrainer
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
1649 763ad5be Thomas Thrainer
1650 763ad5be Thomas Thrainer
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
1651 763ad5be Thomas Thrainer
      # Lock all groups used by instance optimistically; this requires going
1652 763ad5be Thomas Thrainer
      # via the node before it's locked, requiring verification later on
1653 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
1654 da4a52a3 Thomas Thrainer
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
1655 763ad5be Thomas Thrainer
1656 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE:
1657 763ad5be Thomas Thrainer
      if self.op.iallocator is not None:
1658 1c3231aa Thomas Thrainer
        assert self.op.remote_node_uuid is None
1659 763ad5be Thomas Thrainer
        assert not self.needed_locks[locking.LEVEL_NODE]
1660 763ad5be Thomas Thrainer
        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
1661 763ad5be Thomas Thrainer
1662 763ad5be Thomas Thrainer
        # Lock member nodes of all locked groups
1663 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE] = \
1664 1c3231aa Thomas Thrainer
          [node_uuid
1665 763ad5be Thomas Thrainer
           for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
1666 1c3231aa Thomas Thrainer
           for node_uuid in self.cfg.GetNodeGroup(group_uuid).members]
1667 763ad5be Thomas Thrainer
      else:
1668 763ad5be Thomas Thrainer
        assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1669 763ad5be Thomas Thrainer
1670 763ad5be Thomas Thrainer
        self._LockInstancesNodes()
1671 763ad5be Thomas Thrainer
1672 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
1673 763ad5be Thomas Thrainer
      # Reuse node locks
1674 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1675 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE]
1676 763ad5be Thomas Thrainer
1677 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
1678 763ad5be Thomas Thrainer
    """Build hooks env.
1679 763ad5be Thomas Thrainer

1680 763ad5be Thomas Thrainer
    This runs on the master, the primary and all the secondaries.
1681 763ad5be Thomas Thrainer

1682 763ad5be Thomas Thrainer
    """
1683 763ad5be Thomas Thrainer
    instance = self.replacer.instance
1684 763ad5be Thomas Thrainer
    env = {
1685 763ad5be Thomas Thrainer
      "MODE": self.op.mode,
1686 763ad5be Thomas Thrainer
      "NEW_SECONDARY": self.op.remote_node,
1687 1c3231aa Thomas Thrainer
      "OLD_SECONDARY": self.cfg.GetNodeName(instance.secondary_nodes[0]),
1688 763ad5be Thomas Thrainer
      }
1689 5eacbcae Thomas Thrainer
    env.update(BuildInstanceHookEnvByObject(self, instance))
1690 763ad5be Thomas Thrainer
    return env
1691 763ad5be Thomas Thrainer
1692 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
1693 763ad5be Thomas Thrainer
    """Build hooks nodes.
1694 763ad5be Thomas Thrainer

1695 763ad5be Thomas Thrainer
    """
1696 763ad5be Thomas Thrainer
    instance = self.replacer.instance
1697 763ad5be Thomas Thrainer
    nl = [
1698 763ad5be Thomas Thrainer
      self.cfg.GetMasterNode(),
1699 763ad5be Thomas Thrainer
      instance.primary_node,
1700 763ad5be Thomas Thrainer
      ]
1701 1c3231aa Thomas Thrainer
    if self.op.remote_node_uuid is not None:
1702 1c3231aa Thomas Thrainer
      nl.append(self.op.remote_node_uuid)
1703 763ad5be Thomas Thrainer
    return nl, nl
1704 763ad5be Thomas Thrainer
1705 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1706 763ad5be Thomas Thrainer
    """Check prerequisites.
1707 763ad5be Thomas Thrainer

1708 763ad5be Thomas Thrainer
    """
1709 763ad5be Thomas Thrainer
    assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
1710 763ad5be Thomas Thrainer
            self.op.iallocator is None)
1711 763ad5be Thomas Thrainer
1712 763ad5be Thomas Thrainer
    # Verify if node group locks are still correct
1713 763ad5be Thomas Thrainer
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
1714 763ad5be Thomas Thrainer
    if owned_groups:
1715 da4a52a3 Thomas Thrainer
      CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, owned_groups)
1716 763ad5be Thomas Thrainer
1717 763ad5be Thomas Thrainer
    return LogicalUnit.CheckPrereq(self)
1718 763ad5be Thomas Thrainer
1719 763ad5be Thomas Thrainer
1720 763ad5be Thomas Thrainer
class LUInstanceActivateDisks(NoHooksLU):
1721 763ad5be Thomas Thrainer
  """Bring up an instance's disks.
1722 763ad5be Thomas Thrainer

1723 763ad5be Thomas Thrainer
  """
1724 763ad5be Thomas Thrainer
  REQ_BGL = False
1725 763ad5be Thomas Thrainer
1726 763ad5be Thomas Thrainer
  def ExpandNames(self):
1727 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1728 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1729 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1730 763ad5be Thomas Thrainer
1731 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1732 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1733 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1734 763ad5be Thomas Thrainer
1735 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1736 763ad5be Thomas Thrainer
    """Check prerequisites.
1737 763ad5be Thomas Thrainer

1738 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1739 763ad5be Thomas Thrainer

1740 763ad5be Thomas Thrainer
    """
1741 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1742 763ad5be Thomas Thrainer
    assert self.instance is not None, \
1743 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1744 5eacbcae Thomas Thrainer
    CheckNodeOnline(self, self.instance.primary_node)
1745 763ad5be Thomas Thrainer
1746 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1747 763ad5be Thomas Thrainer
    """Activate the disks.
1748 763ad5be Thomas Thrainer

1749 763ad5be Thomas Thrainer
    """
1750 763ad5be Thomas Thrainer
    disks_ok, disks_info = \
1751 5eacbcae Thomas Thrainer
              AssembleInstanceDisks(self, self.instance,
1752 5eacbcae Thomas Thrainer
                                    ignore_size=self.op.ignore_size)
1753 763ad5be Thomas Thrainer
    if not disks_ok:
1754 763ad5be Thomas Thrainer
      raise errors.OpExecError("Cannot activate block devices")
1755 763ad5be Thomas Thrainer
1756 763ad5be Thomas Thrainer
    if self.op.wait_for_sync:
1757 5eacbcae Thomas Thrainer
      if not WaitForSync(self, self.instance):
1758 da4a52a3 Thomas Thrainer
        self.cfg.MarkInstanceDisksInactive(self.instance.uuid)
1759 763ad5be Thomas Thrainer
        raise errors.OpExecError("Some disks of the instance are degraded!")
1760 763ad5be Thomas Thrainer
1761 763ad5be Thomas Thrainer
    return disks_info
1762 763ad5be Thomas Thrainer
1763 763ad5be Thomas Thrainer
1764 763ad5be Thomas Thrainer
class LUInstanceDeactivateDisks(NoHooksLU):
1765 763ad5be Thomas Thrainer
  """Shutdown an instance's disks.
1766 763ad5be Thomas Thrainer

1767 763ad5be Thomas Thrainer
  """
1768 763ad5be Thomas Thrainer
  REQ_BGL = False
1769 763ad5be Thomas Thrainer
1770 763ad5be Thomas Thrainer
  def ExpandNames(self):
1771 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1772 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1773 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1774 763ad5be Thomas Thrainer
1775 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1776 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1777 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1778 763ad5be Thomas Thrainer
1779 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1780 763ad5be Thomas Thrainer
    """Check prerequisites.
1781 763ad5be Thomas Thrainer

1782 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1783 763ad5be Thomas Thrainer

1784 763ad5be Thomas Thrainer
    """
1785 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1786 763ad5be Thomas Thrainer
    assert self.instance is not None, \
1787 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1788 763ad5be Thomas Thrainer
1789 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1790 763ad5be Thomas Thrainer
    """Deactivate the disks
1791 763ad5be Thomas Thrainer

1792 763ad5be Thomas Thrainer
    """
1793 763ad5be Thomas Thrainer
    if self.op.force:
1794 d0d7d7cf Thomas Thrainer
      ShutdownInstanceDisks(self, self.instance)
1795 763ad5be Thomas Thrainer
    else:
1796 d0d7d7cf Thomas Thrainer
      _SafeShutdownInstanceDisks(self, self.instance)
1797 763ad5be Thomas Thrainer
1798 763ad5be Thomas Thrainer
1799 1c3231aa Thomas Thrainer
def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
1800 763ad5be Thomas Thrainer
                               ldisk=False):
1801 763ad5be Thomas Thrainer
  """Check that mirrors are not degraded.
1802 763ad5be Thomas Thrainer

1803 763ad5be Thomas Thrainer
  @attention: The device has to be annotated already.
1804 763ad5be Thomas Thrainer

1805 763ad5be Thomas Thrainer
  The ldisk parameter, if True, will change the test from the
1806 763ad5be Thomas Thrainer
  is_degraded attribute (which represents overall non-ok status for
1807 763ad5be Thomas Thrainer
  the device(s)) to the ldisk (representing the local storage status).
1808 763ad5be Thomas Thrainer

1809 763ad5be Thomas Thrainer
  """
1810 1c3231aa Thomas Thrainer
  lu.cfg.SetDiskID(dev, node_uuid)
1811 763ad5be Thomas Thrainer
1812 763ad5be Thomas Thrainer
  result = True
1813 763ad5be Thomas Thrainer
1814 763ad5be Thomas Thrainer
  if on_primary or dev.AssembleOnSecondary():
1815 1c3231aa Thomas Thrainer
    rstats = lu.rpc.call_blockdev_find(node_uuid, dev)
1816 763ad5be Thomas Thrainer
    msg = rstats.fail_msg
1817 763ad5be Thomas Thrainer
    if msg:
1818 1c3231aa Thomas Thrainer
      lu.LogWarning("Can't find disk on node %s: %s",
1819 1c3231aa Thomas Thrainer
                    lu.cfg.GetNodeName(node_uuid), msg)
1820 763ad5be Thomas Thrainer
      result = False
1821 763ad5be Thomas Thrainer
    elif not rstats.payload:
1822 1c3231aa Thomas Thrainer
      lu.LogWarning("Can't find disk on node %s", lu.cfg.GetNodeName(node_uuid))
1823 763ad5be Thomas Thrainer
      result = False
1824 763ad5be Thomas Thrainer
    else:
1825 763ad5be Thomas Thrainer
      if ldisk:
1826 763ad5be Thomas Thrainer
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
1827 763ad5be Thomas Thrainer
      else:
1828 763ad5be Thomas Thrainer
        result = result and not rstats.payload.is_degraded
1829 763ad5be Thomas Thrainer
1830 763ad5be Thomas Thrainer
  if dev.children:
1831 763ad5be Thomas Thrainer
    for child in dev.children:
1832 1c3231aa Thomas Thrainer
      result = result and _CheckDiskConsistencyInner(lu, instance, child,
1833 1c3231aa Thomas Thrainer
                                                     node_uuid, on_primary)
1834 763ad5be Thomas Thrainer
1835 763ad5be Thomas Thrainer
  return result
1836 763ad5be Thomas Thrainer
1837 763ad5be Thomas Thrainer
1838 1c3231aa Thomas Thrainer
def CheckDiskConsistency(lu, instance, dev, node_uuid, on_primary, ldisk=False):
1839 763ad5be Thomas Thrainer
  """Wrapper around L{_CheckDiskConsistencyInner}.
1840 763ad5be Thomas Thrainer

1841 763ad5be Thomas Thrainer
  """
1842 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
1843 1c3231aa Thomas Thrainer
  return _CheckDiskConsistencyInner(lu, instance, disk, node_uuid, on_primary,
1844 763ad5be Thomas Thrainer
                                    ldisk=ldisk)
1845 763ad5be Thomas Thrainer
1846 763ad5be Thomas Thrainer
1847 1c3231aa Thomas Thrainer
def _BlockdevFind(lu, node_uuid, dev, instance):
1848 763ad5be Thomas Thrainer
  """Wrapper around call_blockdev_find to annotate diskparams.
1849 763ad5be Thomas Thrainer

1850 763ad5be Thomas Thrainer
  @param lu: A reference to the lu object
1851 1c3231aa Thomas Thrainer
  @param node_uuid: The node to call out
1852 763ad5be Thomas Thrainer
  @param dev: The device to find
1853 763ad5be Thomas Thrainer
  @param instance: The instance object the device belongs to
1854 763ad5be Thomas Thrainer
  @returns The result of the rpc call
1855 763ad5be Thomas Thrainer

1856 763ad5be Thomas Thrainer
  """
1857 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
1858 1c3231aa Thomas Thrainer
  return lu.rpc.call_blockdev_find(node_uuid, disk)
1859 763ad5be Thomas Thrainer
1860 763ad5be Thomas Thrainer
1861 763ad5be Thomas Thrainer
def _GenerateUniqueNames(lu, exts):
1862 763ad5be Thomas Thrainer
  """Generate a suitable LV name.
1863 763ad5be Thomas Thrainer

1864 763ad5be Thomas Thrainer
  This will generate a logical volume name for the given instance.
1865 763ad5be Thomas Thrainer

1866 763ad5be Thomas Thrainer
  """
1867 763ad5be Thomas Thrainer
  results = []
1868 763ad5be Thomas Thrainer
  for val in exts:
1869 763ad5be Thomas Thrainer
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
1870 763ad5be Thomas Thrainer
    results.append("%s%s" % (new_id, val))
1871 763ad5be Thomas Thrainer
  return results
1872 763ad5be Thomas Thrainer
1873 763ad5be Thomas Thrainer
1874 763ad5be Thomas Thrainer
class TLReplaceDisks(Tasklet):
1875 763ad5be Thomas Thrainer
  """Replaces disks for an instance.
1876 763ad5be Thomas Thrainer

1877 763ad5be Thomas Thrainer
  Note: Locking is not within the scope of this class.
1878 763ad5be Thomas Thrainer

1879 763ad5be Thomas Thrainer
  """
1880 da4a52a3 Thomas Thrainer
  def __init__(self, lu, instance_uuid, instance_name, mode, iallocator_name,
1881 da4a52a3 Thomas Thrainer
               remote_node_uuid, disks, early_release, ignore_ipolicy):
1882 763ad5be Thomas Thrainer
    """Initializes this class.
1883 763ad5be Thomas Thrainer

1884 763ad5be Thomas Thrainer
    """
1885 763ad5be Thomas Thrainer
    Tasklet.__init__(self, lu)
1886 763ad5be Thomas Thrainer
1887 763ad5be Thomas Thrainer
    # Parameters
1888 da4a52a3 Thomas Thrainer
    self.instance_uuid = instance_uuid
1889 763ad5be Thomas Thrainer
    self.instance_name = instance_name
1890 763ad5be Thomas Thrainer
    self.mode = mode
1891 763ad5be Thomas Thrainer
    self.iallocator_name = iallocator_name
1892 1c3231aa Thomas Thrainer
    self.remote_node_uuid = remote_node_uuid
1893 763ad5be Thomas Thrainer
    self.disks = disks
1894 763ad5be Thomas Thrainer
    self.early_release = early_release
1895 763ad5be Thomas Thrainer
    self.ignore_ipolicy = ignore_ipolicy
1896 763ad5be Thomas Thrainer
1897 763ad5be Thomas Thrainer
    # Runtime data
1898 763ad5be Thomas Thrainer
    self.instance = None
1899 1c3231aa Thomas Thrainer
    self.new_node_uuid = None
1900 1c3231aa Thomas Thrainer
    self.target_node_uuid = None
1901 1c3231aa Thomas Thrainer
    self.other_node_uuid = None
1902 763ad5be Thomas Thrainer
    self.remote_node_info = None
1903 763ad5be Thomas Thrainer
    self.node_secondary_ip = None
1904 763ad5be Thomas Thrainer
1905 763ad5be Thomas Thrainer
  @staticmethod
1906 da4a52a3 Thomas Thrainer
  def _RunAllocator(lu, iallocator_name, instance_uuid,
1907 1c3231aa Thomas Thrainer
                    relocate_from_node_uuids):
1908 763ad5be Thomas Thrainer
    """Compute a new secondary node using an IAllocator.
1909 763ad5be Thomas Thrainer

1910 763ad5be Thomas Thrainer
    """
1911 1c3231aa Thomas Thrainer
    req = iallocator.IAReqRelocate(
1912 da4a52a3 Thomas Thrainer
          inst_uuid=instance_uuid,
1913 1c3231aa Thomas Thrainer
          relocate_from_node_uuids=list(relocate_from_node_uuids))
1914 763ad5be Thomas Thrainer
    ial = iallocator.IAllocator(lu.cfg, lu.rpc, req)
1915 763ad5be Thomas Thrainer
1916 763ad5be Thomas Thrainer
    ial.Run(iallocator_name)
1917 763ad5be Thomas Thrainer
1918 763ad5be Thomas Thrainer
    if not ial.success:
1919 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
1920 763ad5be Thomas Thrainer
                                 " %s" % (iallocator_name, ial.info),
1921 763ad5be Thomas Thrainer
                                 errors.ECODE_NORES)
1922 763ad5be Thomas Thrainer
1923 763ad5be Thomas Thrainer
    remote_node_name = ial.result[0]
1924 1c3231aa Thomas Thrainer
    remote_node = lu.cfg.GetNodeInfoByName(remote_node_name)
1925 1c3231aa Thomas Thrainer
1926 1c3231aa Thomas Thrainer
    if remote_node is None:
1927 1c3231aa Thomas Thrainer
      raise errors.OpPrereqError("Node %s not found in configuration" %
1928 1c3231aa Thomas Thrainer
                                 remote_node_name, errors.ECODE_NOENT)
1929 763ad5be Thomas Thrainer
1930 763ad5be Thomas Thrainer
    lu.LogInfo("Selected new secondary for instance '%s': %s",
1931 da4a52a3 Thomas Thrainer
               instance_uuid, remote_node_name)
1932 763ad5be Thomas Thrainer
1933 1c3231aa Thomas Thrainer
    return remote_node.uuid
1934 763ad5be Thomas Thrainer
1935 1c3231aa Thomas Thrainer
  def _FindFaultyDisks(self, node_uuid):
1936 5eacbcae Thomas Thrainer
    """Wrapper for L{FindFaultyInstanceDisks}.
1937 763ad5be Thomas Thrainer

1938 763ad5be Thomas Thrainer
    """
1939 5eacbcae Thomas Thrainer
    return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
1940 1c3231aa Thomas Thrainer
                                   node_uuid, True)
1941 763ad5be Thomas Thrainer
1942 763ad5be Thomas Thrainer
  def _CheckDisksActivated(self, instance):
1943 763ad5be Thomas Thrainer
    """Checks if the instance disks are activated.
1944 763ad5be Thomas Thrainer

1945 763ad5be Thomas Thrainer
    @param instance: The instance to check disks
1946 763ad5be Thomas Thrainer
    @return: True if they are activated, False otherwise
1947 763ad5be Thomas Thrainer

1948 763ad5be Thomas Thrainer
    """
1949 1c3231aa Thomas Thrainer
    node_uuids = instance.all_nodes
1950 763ad5be Thomas Thrainer
1951 763ad5be Thomas Thrainer
    for idx, dev in enumerate(instance.disks):
1952 1c3231aa Thomas Thrainer
      for node_uuid in node_uuids:
1953 1c3231aa Thomas Thrainer
        self.lu.LogInfo("Checking disk/%d on %s", idx,
1954 1c3231aa Thomas Thrainer
                        self.cfg.GetNodeName(node_uuid))
1955 1c3231aa Thomas Thrainer
        self.cfg.SetDiskID(dev, node_uuid)
1956 763ad5be Thomas Thrainer
1957 1c3231aa Thomas Thrainer
        result = _BlockdevFind(self, node_uuid, dev, instance)
1958 763ad5be Thomas Thrainer
1959 763ad5be Thomas Thrainer
        if result.offline:
1960 763ad5be Thomas Thrainer
          continue
1961 763ad5be Thomas Thrainer
        elif result.fail_msg or not result.payload:
1962 763ad5be Thomas Thrainer
          return False
1963 763ad5be Thomas Thrainer
1964 763ad5be Thomas Thrainer
    return True
1965 763ad5be Thomas Thrainer
1966 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1967 763ad5be Thomas Thrainer
    """Check prerequisites.
1968 763ad5be Thomas Thrainer

1969 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1970 763ad5be Thomas Thrainer

1971 763ad5be Thomas Thrainer
    """
1972 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
1973 d0d7d7cf Thomas Thrainer
    assert self.instance is not None, \
1974 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.instance_name
1975 763ad5be Thomas Thrainer
1976 d0d7d7cf Thomas Thrainer
    if self.instance.disk_template != constants.DT_DRBD8:
1977 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
1978 763ad5be Thomas Thrainer
                                 " instances", errors.ECODE_INVAL)
1979 763ad5be Thomas Thrainer
1980 d0d7d7cf Thomas Thrainer
    if len(self.instance.secondary_nodes) != 1:
1981 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The instance has a strange layout,"
1982 763ad5be Thomas Thrainer
                                 " expected one secondary but found %d" %
1983 d0d7d7cf Thomas Thrainer
                                 len(self.instance.secondary_nodes),
1984 763ad5be Thomas Thrainer
                                 errors.ECODE_FAULT)
1985 763ad5be Thomas Thrainer
1986 d0d7d7cf Thomas Thrainer
    secondary_node_uuid = self.instance.secondary_nodes[0]
1987 763ad5be Thomas Thrainer
1988 763ad5be Thomas Thrainer
    if self.iallocator_name is None:
1989 1c3231aa Thomas Thrainer
      remote_node_uuid = self.remote_node_uuid
1990 763ad5be Thomas Thrainer
    else:
1991 1c3231aa Thomas Thrainer
      remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name,
1992 da4a52a3 Thomas Thrainer
                                            self.instance.uuid,
1993 d0d7d7cf Thomas Thrainer
                                            self.instance.secondary_nodes)
1994 763ad5be Thomas Thrainer
1995 1c3231aa Thomas Thrainer
    if remote_node_uuid is None:
1996 763ad5be Thomas Thrainer
      self.remote_node_info = None
1997 763ad5be Thomas Thrainer
    else:
1998 1c3231aa Thomas Thrainer
      assert remote_node_uuid in self.lu.owned_locks(locking.LEVEL_NODE), \
1999 1c3231aa Thomas Thrainer
             "Remote node '%s' is not locked" % remote_node_uuid
2000 763ad5be Thomas Thrainer
2001 1c3231aa Thomas Thrainer
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node_uuid)
2002 763ad5be Thomas Thrainer
      assert self.remote_node_info is not None, \
2003 1c3231aa Thomas Thrainer
        "Cannot retrieve locked node %s" % remote_node_uuid
2004 763ad5be Thomas Thrainer
2005 1c3231aa Thomas Thrainer
    if remote_node_uuid == self.instance.primary_node:
2006 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The specified node is the primary node of"
2007 763ad5be Thomas Thrainer
                                 " the instance", errors.ECODE_INVAL)
2008 763ad5be Thomas Thrainer
2009 1c3231aa Thomas Thrainer
    if remote_node_uuid == secondary_node_uuid:
2010 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The specified node is already the"
2011 763ad5be Thomas Thrainer
                                 " secondary node of the instance",
2012 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
2013 763ad5be Thomas Thrainer
2014 763ad5be Thomas Thrainer
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
2015 763ad5be Thomas Thrainer
                                    constants.REPLACE_DISK_CHG):
2016 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
2017 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
2018 763ad5be Thomas Thrainer
2019 763ad5be Thomas Thrainer
    if self.mode == constants.REPLACE_DISK_AUTO:
2020 d0d7d7cf Thomas Thrainer
      if not self._CheckDisksActivated(self.instance):
2021 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Please run activate-disks on instance %s"
2022 763ad5be Thomas Thrainer
                                   " first" % self.instance_name,
2023 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
2024 d0d7d7cf Thomas Thrainer
      faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
2025 1c3231aa Thomas Thrainer
      faulty_secondary = self._FindFaultyDisks(secondary_node_uuid)
2026 763ad5be Thomas Thrainer
2027 763ad5be Thomas Thrainer
      if faulty_primary and faulty_secondary:
2028 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
2029 763ad5be Thomas Thrainer
                                   " one node and can not be repaired"
2030 763ad5be Thomas Thrainer
                                   " automatically" % self.instance_name,
2031 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
2032 763ad5be Thomas Thrainer
2033 763ad5be Thomas Thrainer
      if faulty_primary:
2034 763ad5be Thomas Thrainer
        self.disks = faulty_primary
2035 d0d7d7cf Thomas Thrainer
        self.target_node_uuid = self.instance.primary_node
2036 1c3231aa Thomas Thrainer
        self.other_node_uuid = secondary_node_uuid
2037 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2038 763ad5be Thomas Thrainer
      elif faulty_secondary:
2039 763ad5be Thomas Thrainer
        self.disks = faulty_secondary
2040 1c3231aa Thomas Thrainer
        self.target_node_uuid = secondary_node_uuid
2041 d0d7d7cf Thomas Thrainer
        self.other_node_uuid = self.instance.primary_node
2042 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2043 763ad5be Thomas Thrainer
      else:
2044 763ad5be Thomas Thrainer
        self.disks = []
2045 763ad5be Thomas Thrainer
        check_nodes = []
2046 763ad5be Thomas Thrainer
2047 763ad5be Thomas Thrainer
    else:
2048 763ad5be Thomas Thrainer
      # Non-automatic modes
2049 763ad5be Thomas Thrainer
      if self.mode == constants.REPLACE_DISK_PRI:
2050 d0d7d7cf Thomas Thrainer
        self.target_node_uuid = self.instance.primary_node
2051 1c3231aa Thomas Thrainer
        self.other_node_uuid = secondary_node_uuid
2052 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2053 763ad5be Thomas Thrainer
2054 763ad5be Thomas Thrainer
      elif self.mode == constants.REPLACE_DISK_SEC:
2055 1c3231aa Thomas Thrainer
        self.target_node_uuid = secondary_node_uuid
2056 d0d7d7cf Thomas Thrainer
        self.other_node_uuid = self.instance.primary_node
2057 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2058 763ad5be Thomas Thrainer
2059 763ad5be Thomas Thrainer
      elif self.mode == constants.REPLACE_DISK_CHG:
2060 1c3231aa Thomas Thrainer
        self.new_node_uuid = remote_node_uuid
2061 d0d7d7cf Thomas Thrainer
        self.other_node_uuid = self.instance.primary_node
2062 1c3231aa Thomas Thrainer
        self.target_node_uuid = secondary_node_uuid
2063 1c3231aa Thomas Thrainer
        check_nodes = [self.new_node_uuid, self.other_node_uuid]
2064 763ad5be Thomas Thrainer
2065 1c3231aa Thomas Thrainer
        CheckNodeNotDrained(self.lu, remote_node_uuid)
2066 1c3231aa Thomas Thrainer
        CheckNodeVmCapable(self.lu, remote_node_uuid)
2067 763ad5be Thomas Thrainer
2068 1c3231aa Thomas Thrainer
        old_node_info = self.cfg.GetNodeInfo(secondary_node_uuid)
2069 763ad5be Thomas Thrainer
        assert old_node_info is not None
2070 763ad5be Thomas Thrainer
        if old_node_info.offline and not self.early_release:
2071 763ad5be Thomas Thrainer
          # doesn't make sense to delay the release
2072 763ad5be Thomas Thrainer
          self.early_release = True
2073 763ad5be Thomas Thrainer
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
2074 1c3231aa Thomas Thrainer
                          " early-release mode", secondary_node_uuid)
2075 763ad5be Thomas Thrainer
2076 763ad5be Thomas Thrainer
      else:
2077 763ad5be Thomas Thrainer
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
2078 763ad5be Thomas Thrainer
                                     self.mode)
2079 763ad5be Thomas Thrainer
2080 763ad5be Thomas Thrainer
      # If not specified all disks should be replaced
2081 763ad5be Thomas Thrainer
      if not self.disks:
2082 763ad5be Thomas Thrainer
        self.disks = range(len(self.instance.disks))
2083 763ad5be Thomas Thrainer
2084 763ad5be Thomas Thrainer
    # TODO: This is ugly, but right now we can't distinguish between internal
2085 763ad5be Thomas Thrainer
    # submitted opcode and external one. We should fix that.
2086 763ad5be Thomas Thrainer
    if self.remote_node_info:
2087 763ad5be Thomas Thrainer
      # We change the node, lets verify it still meets instance policy
2088 763ad5be Thomas Thrainer
      new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
2089 763ad5be Thomas Thrainer
      cluster = self.cfg.GetClusterInfo()
2090 763ad5be Thomas Thrainer
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2091 763ad5be Thomas Thrainer
                                                              new_group_info)
2092 d0d7d7cf Thomas Thrainer
      CheckTargetNodeIPolicy(self, ipolicy, self.instance,
2093 d0d7d7cf Thomas Thrainer
                             self.remote_node_info, self.cfg,
2094 d0d7d7cf Thomas Thrainer
                             ignore=self.ignore_ipolicy)
2095 763ad5be Thomas Thrainer
2096 1c3231aa Thomas Thrainer
    for node_uuid in check_nodes:
2097 1c3231aa Thomas Thrainer
      CheckNodeOnline(self.lu, node_uuid)
2098 763ad5be Thomas Thrainer
2099 1c3231aa Thomas Thrainer
    touched_nodes = frozenset(node_uuid for node_uuid in [self.new_node_uuid,
2100 1c3231aa Thomas Thrainer
                                                          self.other_node_uuid,
2101 1c3231aa Thomas Thrainer
                                                          self.target_node_uuid]
2102 1c3231aa Thomas Thrainer
                              if node_uuid is not None)
2103 763ad5be Thomas Thrainer
2104 763ad5be Thomas Thrainer
    # Release unneeded node and node resource locks
2105 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
2106 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
2107 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
2108 763ad5be Thomas Thrainer
2109 763ad5be Thomas Thrainer
    # Release any owned node group
2110 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
2111 763ad5be Thomas Thrainer
2112 763ad5be Thomas Thrainer
    # Check whether disks are valid
2113 763ad5be Thomas Thrainer
    for disk_idx in self.disks:
2114 d0d7d7cf Thomas Thrainer
      self.instance.FindDisk(disk_idx)
2115 763ad5be Thomas Thrainer
2116 763ad5be Thomas Thrainer
    # Get secondary node IP addresses
2117 1c3231aa Thomas Thrainer
    self.node_secondary_ip = dict((uuid, node.secondary_ip) for (uuid, node)
2118 763ad5be Thomas Thrainer
                                  in self.cfg.GetMultiNodeInfo(touched_nodes))
2119 763ad5be Thomas Thrainer
2120 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
2121 763ad5be Thomas Thrainer
    """Execute disk replacement.
2122 763ad5be Thomas Thrainer

2123 763ad5be Thomas Thrainer
    This dispatches the disk replacement to the appropriate handler.
2124 763ad5be Thomas Thrainer

2125 763ad5be Thomas Thrainer
    """
2126 763ad5be Thomas Thrainer
    if __debug__:
2127 763ad5be Thomas Thrainer
      # Verify owned locks before starting operation
2128 763ad5be Thomas Thrainer
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
2129 763ad5be Thomas Thrainer
      assert set(owned_nodes) == set(self.node_secondary_ip), \
2130 763ad5be Thomas Thrainer
          ("Incorrect node locks, owning %s, expected %s" %
2131 763ad5be Thomas Thrainer
           (owned_nodes, self.node_secondary_ip.keys()))
2132 763ad5be Thomas Thrainer
      assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
2133 763ad5be Thomas Thrainer
              self.lu.owned_locks(locking.LEVEL_NODE_RES))
2134 763ad5be Thomas Thrainer
      assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
2135 763ad5be Thomas Thrainer
2136 763ad5be Thomas Thrainer
      owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
2137 763ad5be Thomas Thrainer
      assert list(owned_instances) == [self.instance_name], \
2138 763ad5be Thomas Thrainer
          "Instance '%s' not locked" % self.instance_name
2139 763ad5be Thomas Thrainer
2140 763ad5be Thomas Thrainer
      assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
2141 763ad5be Thomas Thrainer
          "Should not own any node group lock at this point"
2142 763ad5be Thomas Thrainer
2143 763ad5be Thomas Thrainer
    if not self.disks:
2144 763ad5be Thomas Thrainer
      feedback_fn("No disks need replacement for instance '%s'" %
2145 763ad5be Thomas Thrainer
                  self.instance.name)
2146 763ad5be Thomas Thrainer
      return
2147 763ad5be Thomas Thrainer
2148 763ad5be Thomas Thrainer
    feedback_fn("Replacing disk(s) %s for instance '%s'" %
2149 763ad5be Thomas Thrainer
                (utils.CommaJoin(self.disks), self.instance.name))
2150 1c3231aa Thomas Thrainer
    feedback_fn("Current primary node: %s" %
2151 1c3231aa Thomas Thrainer
                self.cfg.GetNodeName(self.instance.primary_node))
2152 763ad5be Thomas Thrainer
    feedback_fn("Current seconary node: %s" %
2153 1c3231aa Thomas Thrainer
                utils.CommaJoin(self.cfg.GetNodeNames(
2154 1c3231aa Thomas Thrainer
                                  self.instance.secondary_nodes)))
2155 763ad5be Thomas Thrainer
2156 1d4a4b26 Thomas Thrainer
    activate_disks = not self.instance.disks_active
2157 763ad5be Thomas Thrainer
2158 763ad5be Thomas Thrainer
    # Activate the instance disks if we're replacing them on a down instance
2159 763ad5be Thomas Thrainer
    if activate_disks:
2160 5eacbcae Thomas Thrainer
      StartInstanceDisks(self.lu, self.instance, True)
2161 763ad5be Thomas Thrainer
2162 763ad5be Thomas Thrainer
    try:
2163 763ad5be Thomas Thrainer
      # Should we replace the secondary node?
2164 1c3231aa Thomas Thrainer
      if self.new_node_uuid is not None:
2165 763ad5be Thomas Thrainer
        fn = self._ExecDrbd8Secondary
2166 763ad5be Thomas Thrainer
      else:
2167 763ad5be Thomas Thrainer
        fn = self._ExecDrbd8DiskOnly
2168 763ad5be Thomas Thrainer
2169 763ad5be Thomas Thrainer
      result = fn(feedback_fn)
2170 763ad5be Thomas Thrainer
    finally:
2171 763ad5be Thomas Thrainer
      # Deactivate the instance disks if we're replacing them on a
2172 763ad5be Thomas Thrainer
      # down instance
2173 763ad5be Thomas Thrainer
      if activate_disks:
2174 763ad5be Thomas Thrainer
        _SafeShutdownInstanceDisks(self.lu, self.instance)
2175 763ad5be Thomas Thrainer
2176 763ad5be Thomas Thrainer
    assert not self.lu.owned_locks(locking.LEVEL_NODE)
2177 763ad5be Thomas Thrainer
2178 763ad5be Thomas Thrainer
    if __debug__:
2179 763ad5be Thomas Thrainer
      # Verify owned locks
2180 763ad5be Thomas Thrainer
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
2181 763ad5be Thomas Thrainer
      nodes = frozenset(self.node_secondary_ip)
2182 763ad5be Thomas Thrainer
      assert ((self.early_release and not owned_nodes) or
2183 763ad5be Thomas Thrainer
              (not self.early_release and not (set(owned_nodes) - nodes))), \
2184 763ad5be Thomas Thrainer
        ("Not owning the correct locks, early_release=%s, owned=%r,"
2185 763ad5be Thomas Thrainer
         " nodes=%r" % (self.early_release, owned_nodes, nodes))
2186 763ad5be Thomas Thrainer
2187 763ad5be Thomas Thrainer
    return result
2188 763ad5be Thomas Thrainer
2189 1c3231aa Thomas Thrainer
  def _CheckVolumeGroup(self, node_uuids):
2190 763ad5be Thomas Thrainer
    self.lu.LogInfo("Checking volume groups")
2191 763ad5be Thomas Thrainer
2192 763ad5be Thomas Thrainer
    vgname = self.cfg.GetVGName()
2193 763ad5be Thomas Thrainer
2194 763ad5be Thomas Thrainer
    # Make sure volume group exists on all involved nodes
2195 1c3231aa Thomas Thrainer
    results = self.rpc.call_vg_list(node_uuids)
2196 763ad5be Thomas Thrainer
    if not results:
2197 763ad5be Thomas Thrainer
      raise errors.OpExecError("Can't list volume groups on the nodes")
2198 763ad5be Thomas Thrainer
2199 1c3231aa Thomas Thrainer
    for node_uuid in node_uuids:
2200 1c3231aa Thomas Thrainer
      res = results[node_uuid]
2201 1c3231aa Thomas Thrainer
      res.Raise("Error checking node %s" % self.cfg.GetNodeName(node_uuid))
2202 763ad5be Thomas Thrainer
      if vgname not in res.payload:
2203 763ad5be Thomas Thrainer
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
2204 1c3231aa Thomas Thrainer
                                 (vgname, self.cfg.GetNodeName(node_uuid)))
2205 763ad5be Thomas Thrainer
2206 1c3231aa Thomas Thrainer
  def _CheckDisksExistence(self, node_uuids):
2207 763ad5be Thomas Thrainer
    # Check disk existence
2208 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2209 763ad5be Thomas Thrainer
      if idx not in self.disks:
2210 763ad5be Thomas Thrainer
        continue
2211 763ad5be Thomas Thrainer
2212 1c3231aa Thomas Thrainer
      for node_uuid in node_uuids:
2213 1c3231aa Thomas Thrainer
        self.lu.LogInfo("Checking disk/%d on %s", idx,
2214 1c3231aa Thomas Thrainer
                        self.cfg.GetNodeName(node_uuid))
2215 1c3231aa Thomas Thrainer
        self.cfg.SetDiskID(dev, node_uuid)
2216 763ad5be Thomas Thrainer
2217 1c3231aa Thomas Thrainer
        result = _BlockdevFind(self, node_uuid, dev, self.instance)
2218 763ad5be Thomas Thrainer
2219 763ad5be Thomas Thrainer
        msg = result.fail_msg
2220 763ad5be Thomas Thrainer
        if msg or not result.payload:
2221 763ad5be Thomas Thrainer
          if not msg:
2222 763ad5be Thomas Thrainer
            msg = "disk not found"
2223 763ad5be Thomas Thrainer
          raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
2224 1c3231aa Thomas Thrainer
                                   (idx, self.cfg.GetNodeName(node_uuid), msg))
2225 763ad5be Thomas Thrainer
2226 1c3231aa Thomas Thrainer
  def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk):
2227 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2228 763ad5be Thomas Thrainer
      if idx not in self.disks:
2229 763ad5be Thomas Thrainer
        continue
2230 763ad5be Thomas Thrainer
2231 763ad5be Thomas Thrainer
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
2232 1c3231aa Thomas Thrainer
                      (idx, self.cfg.GetNodeName(node_uuid)))
2233 763ad5be Thomas Thrainer
2234 1c3231aa Thomas Thrainer
      if not CheckDiskConsistency(self.lu, self.instance, dev, node_uuid,
2235 5eacbcae Thomas Thrainer
                                  on_primary, ldisk=ldisk):
2236 763ad5be Thomas Thrainer
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
2237 763ad5be Thomas Thrainer
                                 " replace disks for instance %s" %
2238 1c3231aa Thomas Thrainer
                                 (self.cfg.GetNodeName(node_uuid),
2239 1c3231aa Thomas Thrainer
                                  self.instance.name))
2240 763ad5be Thomas Thrainer
2241 1c3231aa Thomas Thrainer
  def _CreateNewStorage(self, node_uuid):
2242 763ad5be Thomas Thrainer
    """Create new storage on the primary or secondary node.
2243 763ad5be Thomas Thrainer

2244 763ad5be Thomas Thrainer
    This is only used for same-node replaces, not for changing the
2245 763ad5be Thomas Thrainer
    secondary node, hence we don't want to modify the existing disk.
2246 763ad5be Thomas Thrainer

2247 763ad5be Thomas Thrainer
    """
2248 763ad5be Thomas Thrainer
    iv_names = {}
2249 763ad5be Thomas Thrainer
2250 5eacbcae Thomas Thrainer
    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
2251 763ad5be Thomas Thrainer
    for idx, dev in enumerate(disks):
2252 763ad5be Thomas Thrainer
      if idx not in self.disks:
2253 763ad5be Thomas Thrainer
        continue
2254 763ad5be Thomas Thrainer
2255 1c3231aa Thomas Thrainer
      self.lu.LogInfo("Adding storage on %s for disk/%d",
2256 1c3231aa Thomas Thrainer
                      self.cfg.GetNodeName(node_uuid), idx)
2257 763ad5be Thomas Thrainer
2258 1c3231aa Thomas Thrainer
      self.cfg.SetDiskID(dev, node_uuid)
2259 763ad5be Thomas Thrainer
2260 763ad5be Thomas Thrainer
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
2261 763ad5be Thomas Thrainer
      names = _GenerateUniqueNames(self.lu, lv_names)
2262 763ad5be Thomas Thrainer
2263 763ad5be Thomas Thrainer
      (data_disk, meta_disk) = dev.children
2264 763ad5be Thomas Thrainer
      vg_data = data_disk.logical_id[0]
2265 763ad5be Thomas Thrainer
      lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
2266 763ad5be Thomas Thrainer
                             logical_id=(vg_data, names[0]),
2267 763ad5be Thomas Thrainer
                             params=data_disk.params)
2268 763ad5be Thomas Thrainer
      vg_meta = meta_disk.logical_id[0]
2269 763ad5be Thomas Thrainer
      lv_meta = objects.Disk(dev_type=constants.LD_LV,
2270 763ad5be Thomas Thrainer
                             size=constants.DRBD_META_SIZE,
2271 763ad5be Thomas Thrainer
                             logical_id=(vg_meta, names[1]),
2272 763ad5be Thomas Thrainer
                             params=meta_disk.params)
2273 763ad5be Thomas Thrainer
2274 763ad5be Thomas Thrainer
      new_lvs = [lv_data, lv_meta]
2275 763ad5be Thomas Thrainer
      old_lvs = [child.Copy() for child in dev.children]
2276 763ad5be Thomas Thrainer
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
2277 1c3231aa Thomas Thrainer
      excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg, node_uuid)
2278 763ad5be Thomas Thrainer
2279 763ad5be Thomas Thrainer
      # we pass force_create=True to force the LVM creation
2280 763ad5be Thomas Thrainer
      for new_lv in new_lvs:
2281 f2b58d93 Thomas Thrainer
        try:
2282 dad226e3 Thomas Thrainer
          _CreateBlockDevInner(self.lu, node_uuid, self.instance, new_lv, True,
2283 f2b58d93 Thomas Thrainer
                               GetInstanceInfoText(self.instance), False,
2284 f2b58d93 Thomas Thrainer
                               excl_stor)
2285 f2b58d93 Thomas Thrainer
        except errors.DeviceCreationError, e:
2286 f2b58d93 Thomas Thrainer
          raise errors.OpExecError("Can't create block device: %s" % e.message)
2287 763ad5be Thomas Thrainer
2288 763ad5be Thomas Thrainer
    return iv_names
2289 763ad5be Thomas Thrainer
2290 1c3231aa Thomas Thrainer
  def _CheckDevices(self, node_uuid, iv_names):
2291 763ad5be Thomas Thrainer
    for name, (dev, _, _) in iv_names.iteritems():
2292 1c3231aa Thomas Thrainer
      self.cfg.SetDiskID(dev, node_uuid)
2293 763ad5be Thomas Thrainer
2294 1c3231aa Thomas Thrainer
      result = _BlockdevFind(self, node_uuid, dev, self.instance)
2295 763ad5be Thomas Thrainer
2296 763ad5be Thomas Thrainer
      msg = result.fail_msg
2297 763ad5be Thomas Thrainer
      if msg or not result.payload:
2298 763ad5be Thomas Thrainer
        if not msg:
2299 763ad5be Thomas Thrainer
          msg = "disk not found"
2300 763ad5be Thomas Thrainer
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
2301 763ad5be Thomas Thrainer
                                 (name, msg))
2302 763ad5be Thomas Thrainer
2303 763ad5be Thomas Thrainer
      if result.payload.is_degraded:
2304 763ad5be Thomas Thrainer
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
2305 763ad5be Thomas Thrainer
2306 1c3231aa Thomas Thrainer
  def _RemoveOldStorage(self, node_uuid, iv_names):
2307 763ad5be Thomas Thrainer
    for name, (_, old_lvs, _) in iv_names.iteritems():
2308 763ad5be Thomas Thrainer
      self.lu.LogInfo("Remove logical volumes for %s", name)
2309 763ad5be Thomas Thrainer
2310 763ad5be Thomas Thrainer
      for lv in old_lvs:
2311 1c3231aa Thomas Thrainer
        self.cfg.SetDiskID(lv, node_uuid)
2312 763ad5be Thomas Thrainer
2313 1c3231aa Thomas Thrainer
        msg = self.rpc.call_blockdev_remove(node_uuid, lv).fail_msg
2314 763ad5be Thomas Thrainer
        if msg:
2315 763ad5be Thomas Thrainer
          self.lu.LogWarning("Can't remove old LV: %s", msg,
2316 763ad5be Thomas Thrainer
                             hint="remove unused LVs manually")
2317 763ad5be Thomas Thrainer
2318 763ad5be Thomas Thrainer
  def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
2319 763ad5be Thomas Thrainer
    """Replace a disk on the primary or secondary for DRBD 8.
2320 763ad5be Thomas Thrainer

2321 763ad5be Thomas Thrainer
    The algorithm for replace is quite complicated:
2322 763ad5be Thomas Thrainer

2323 763ad5be Thomas Thrainer
      1. for each disk to be replaced:
2324 763ad5be Thomas Thrainer

2325 763ad5be Thomas Thrainer
        1. create new LVs on the target node with unique names
2326 763ad5be Thomas Thrainer
        1. detach old LVs from the drbd device
2327 763ad5be Thomas Thrainer
        1. rename old LVs to name_replaced.<time_t>
2328 763ad5be Thomas Thrainer
        1. rename new LVs to old LVs
2329 763ad5be Thomas Thrainer
        1. attach the new LVs (with the old names now) to the drbd device
2330 763ad5be Thomas Thrainer

2331 763ad5be Thomas Thrainer
      1. wait for sync across all devices
2332 763ad5be Thomas Thrainer

2333 763ad5be Thomas Thrainer
      1. for each modified disk:
2334 763ad5be Thomas Thrainer

2335 763ad5be Thomas Thrainer
        1. remove old LVs (which have the name name_replaces.<time_t>)
2336 763ad5be Thomas Thrainer

2337 763ad5be Thomas Thrainer
    Failures are not very well handled.
2338 763ad5be Thomas Thrainer

2339 763ad5be Thomas Thrainer
    """
2340 763ad5be Thomas Thrainer
    steps_total = 6
2341 763ad5be Thomas Thrainer
2342 763ad5be Thomas Thrainer
    # Step: check device activation
2343 763ad5be Thomas Thrainer
    self.lu.LogStep(1, steps_total, "Check device existence")
2344 1c3231aa Thomas Thrainer
    self._CheckDisksExistence([self.other_node_uuid, self.target_node_uuid])
2345 1c3231aa Thomas Thrainer
    self._CheckVolumeGroup([self.target_node_uuid, self.other_node_uuid])
2346 763ad5be Thomas Thrainer
2347 763ad5be Thomas Thrainer
    # Step: check other node consistency
2348 763ad5be Thomas Thrainer
    self.lu.LogStep(2, steps_total, "Check peer consistency")
2349 1c3231aa Thomas Thrainer
    self._CheckDisksConsistency(
2350 1c3231aa Thomas Thrainer
      self.other_node_uuid, self.other_node_uuid == self.instance.primary_node,
2351 1c3231aa Thomas Thrainer
      False)
2352 763ad5be Thomas Thrainer
2353 763ad5be Thomas Thrainer
    # Step: create new storage
2354 763ad5be Thomas Thrainer
    self.lu.LogStep(3, steps_total, "Allocate new storage")
2355 1c3231aa Thomas Thrainer
    iv_names = self._CreateNewStorage(self.target_node_uuid)
2356 763ad5be Thomas Thrainer
2357 763ad5be Thomas Thrainer
    # Step: for each lv, detach+rename*2+attach
2358 763ad5be Thomas Thrainer
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
2359 763ad5be Thomas Thrainer
    for dev, old_lvs, new_lvs in iv_names.itervalues():
2360 763ad5be Thomas Thrainer
      self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name)
2361 763ad5be Thomas Thrainer
2362 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_removechildren(self.target_node_uuid, dev,
2363 763ad5be Thomas Thrainer
                                                     old_lvs)
2364 763ad5be Thomas Thrainer
      result.Raise("Can't detach drbd from local storage on node"
2365 1c3231aa Thomas Thrainer
                   " %s for device %s" %
2366 1c3231aa Thomas Thrainer
                   (self.cfg.GetNodeName(self.target_node_uuid), dev.iv_name))
2367 763ad5be Thomas Thrainer
      #dev.children = []
2368 763ad5be Thomas Thrainer
      #cfg.Update(instance)
2369 763ad5be Thomas Thrainer
2370 763ad5be Thomas Thrainer
      # ok, we created the new LVs, so now we know we have the needed
2371 763ad5be Thomas Thrainer
      # storage; as such, we proceed on the target node to rename
2372 763ad5be Thomas Thrainer
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
2373 763ad5be Thomas Thrainer
      # using the assumption that logical_id == physical_id (which in
2374 763ad5be Thomas Thrainer
      # turn is the unique_id on that node)
2375 763ad5be Thomas Thrainer
2376 763ad5be Thomas Thrainer
      # FIXME(iustin): use a better name for the replaced LVs
2377 763ad5be Thomas Thrainer
      temp_suffix = int(time.time())
2378 763ad5be Thomas Thrainer
      ren_fn = lambda d, suff: (d.physical_id[0],
2379 763ad5be Thomas Thrainer
                                d.physical_id[1] + "_replaced-%s" % suff)
2380 763ad5be Thomas Thrainer
2381 763ad5be Thomas Thrainer
      # Build the rename list based on what LVs exist on the node
2382 763ad5be Thomas Thrainer
      rename_old_to_new = []
2383 763ad5be Thomas Thrainer
      for to_ren in old_lvs:
2384 1c3231aa Thomas Thrainer
        result = self.rpc.call_blockdev_find(self.target_node_uuid, to_ren)
2385 763ad5be Thomas Thrainer
        if not result.fail_msg and result.payload:
2386 763ad5be Thomas Thrainer
          # device exists
2387 763ad5be Thomas Thrainer
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
2388 763ad5be Thomas Thrainer
2389 763ad5be Thomas Thrainer
      self.lu.LogInfo("Renaming the old LVs on the target node")
2390 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_rename(self.target_node_uuid,
2391 763ad5be Thomas Thrainer
                                             rename_old_to_new)
2392 1c3231aa Thomas Thrainer
      result.Raise("Can't rename old LVs on node %s" %
2393 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(self.target_node_uuid))
2394 763ad5be Thomas Thrainer
2395 763ad5be Thomas Thrainer
      # Now we rename the new LVs to the old LVs
2396 763ad5be Thomas Thrainer
      self.lu.LogInfo("Renaming the new LVs on the target node")
2397 763ad5be Thomas Thrainer
      rename_new_to_old = [(new, old.physical_id)
2398 763ad5be Thomas Thrainer
                           for old, new in zip(old_lvs, new_lvs)]
2399 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_rename(self.target_node_uuid,
2400 763ad5be Thomas Thrainer
                                             rename_new_to_old)
2401 1c3231aa Thomas Thrainer
      result.Raise("Can't rename new LVs on node %s" %
2402 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(self.target_node_uuid))
2403 763ad5be Thomas Thrainer
2404 763ad5be Thomas Thrainer
      # Intermediate steps of in memory modifications
2405 763ad5be Thomas Thrainer
      for old, new in zip(old_lvs, new_lvs):
2406 763ad5be Thomas Thrainer
        new.logical_id = old.logical_id
2407 1c3231aa Thomas Thrainer
        self.cfg.SetDiskID(new, self.target_node_uuid)
2408 763ad5be Thomas Thrainer
2409 763ad5be Thomas Thrainer
      # We need to modify old_lvs so that removal later removes the
2410 763ad5be Thomas Thrainer
      # right LVs, not the newly added ones; note that old_lvs is a
2411 763ad5be Thomas Thrainer
      # copy here
2412 763ad5be Thomas Thrainer
      for disk in old_lvs:
2413 763ad5be Thomas Thrainer
        disk.logical_id = ren_fn(disk, temp_suffix)
2414 1c3231aa Thomas Thrainer
        self.cfg.SetDiskID(disk, self.target_node_uuid)
2415 763ad5be Thomas Thrainer
2416 763ad5be Thomas Thrainer
      # Now that the new lvs have the old name, we can add them to the device
2417 1c3231aa Thomas Thrainer
      self.lu.LogInfo("Adding new mirror component on %s",
2418 1c3231aa Thomas Thrainer
                      self.cfg.GetNodeName(self.target_node_uuid))
2419 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_addchildren(self.target_node_uuid,
2420 763ad5be Thomas Thrainer
                                                  (dev, self.instance), new_lvs)
2421 763ad5be Thomas Thrainer
      msg = result.fail_msg
2422 763ad5be Thomas Thrainer
      if msg:
2423 763ad5be Thomas Thrainer
        for new_lv in new_lvs:
2424 1c3231aa Thomas Thrainer
          msg2 = self.rpc.call_blockdev_remove(self.target_node_uuid,
2425 763ad5be Thomas Thrainer
                                               new_lv).fail_msg
2426 763ad5be Thomas Thrainer
          if msg2:
2427 763ad5be Thomas Thrainer
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
2428 763ad5be Thomas Thrainer
                               hint=("cleanup manually the unused logical"
2429 763ad5be Thomas Thrainer
                                     "volumes"))
2430 763ad5be Thomas Thrainer
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
2431 763ad5be Thomas Thrainer
2432 763ad5be Thomas Thrainer
    cstep = itertools.count(5)
2433 763ad5be Thomas Thrainer
2434 763ad5be Thomas Thrainer
    if self.early_release:
2435 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2436 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)
2437 763ad5be Thomas Thrainer
      # TODO: Check if releasing locks early still makes sense
2438 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
2439 763ad5be Thomas Thrainer
    else:
2440 763ad5be Thomas Thrainer
      # Release all resource locks except those used by the instance
2441 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
2442 5eacbcae Thomas Thrainer
                   keep=self.node_secondary_ip.keys())
2443 763ad5be Thomas Thrainer
2444 763ad5be Thomas Thrainer
    # Release all node locks while waiting for sync
2445 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE)
2446 763ad5be Thomas Thrainer
2447 763ad5be Thomas Thrainer
    # TODO: Can the instance lock be downgraded here? Take the optional disk
2448 763ad5be Thomas Thrainer
    # shutdown in the caller into consideration.
2449 763ad5be Thomas Thrainer
2450 763ad5be Thomas Thrainer
    # Wait for sync
2451 763ad5be Thomas Thrainer
    # This can fail as the old devices are degraded and _WaitForSync
2452 763ad5be Thomas Thrainer
    # does a combined result over all disks, so we don't check its return value
2453 763ad5be Thomas Thrainer
    self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
2454 5eacbcae Thomas Thrainer
    WaitForSync(self.lu, self.instance)
2455 763ad5be Thomas Thrainer
2456 763ad5be Thomas Thrainer
    # Check all devices manually
2457 763ad5be Thomas Thrainer
    self._CheckDevices(self.instance.primary_node, iv_names)
2458 763ad5be Thomas Thrainer
2459 763ad5be Thomas Thrainer
    # Step: remove old storage
2460 763ad5be Thomas Thrainer
    if not self.early_release:
2461 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2462 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)
2463 763ad5be Thomas Thrainer
2464 763ad5be Thomas Thrainer
  def _ExecDrbd8Secondary(self, feedback_fn):
2465 763ad5be Thomas Thrainer
    """Replace the secondary node for DRBD 8.
2466 763ad5be Thomas Thrainer

2467 763ad5be Thomas Thrainer
    The algorithm for replace is quite complicated:
2468 763ad5be Thomas Thrainer
      - for all disks of the instance:
2469 763ad5be Thomas Thrainer
        - create new LVs on the new node with same names
2470 763ad5be Thomas Thrainer
        - shutdown the drbd device on the old secondary
2471 763ad5be Thomas Thrainer
        - disconnect the drbd network on the primary
2472 763ad5be Thomas Thrainer
        - create the drbd device on the new secondary
2473 763ad5be Thomas Thrainer
        - network attach the drbd on the primary, using an artifice:
2474 763ad5be Thomas Thrainer
          the drbd code for Attach() will connect to the network if it
2475 763ad5be Thomas Thrainer
          finds a device which is connected to the good local disks but
2476 763ad5be Thomas Thrainer
          not network enabled
2477 763ad5be Thomas Thrainer
      - wait for sync across all devices
2478 763ad5be Thomas Thrainer
      - remove all disks from the old secondary
2479 763ad5be Thomas Thrainer

2480 763ad5be Thomas Thrainer
    Failures are not very well handled.
2481 763ad5be Thomas Thrainer

2482 763ad5be Thomas Thrainer
    """
2483 763ad5be Thomas Thrainer
    steps_total = 6
2484 763ad5be Thomas Thrainer
2485 763ad5be Thomas Thrainer
    pnode = self.instance.primary_node
2486 763ad5be Thomas Thrainer
2487 763ad5be Thomas Thrainer
    # Step: check device activation
2488 763ad5be Thomas Thrainer
    self.lu.LogStep(1, steps_total, "Check device existence")
2489 763ad5be Thomas Thrainer
    self._CheckDisksExistence([self.instance.primary_node])
2490 763ad5be Thomas Thrainer
    self._CheckVolumeGroup([self.instance.primary_node])
2491 763ad5be Thomas Thrainer
2492 763ad5be Thomas Thrainer
    # Step: check other node consistency
2493 763ad5be Thomas Thrainer
    self.lu.LogStep(2, steps_total, "Check peer consistency")
2494 763ad5be Thomas Thrainer
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
2495 763ad5be Thomas Thrainer
2496 763ad5be Thomas Thrainer
    # Step: create new storage
2497 763ad5be Thomas Thrainer
    self.lu.LogStep(3, steps_total, "Allocate new storage")
2498 5eacbcae Thomas Thrainer
    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
2499 1c3231aa Thomas Thrainer
    excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg,
2500 1c3231aa Thomas Thrainer
                                                  self.new_node_uuid)
2501 763ad5be Thomas Thrainer
    for idx, dev in enumerate(disks):
2502 763ad5be Thomas Thrainer
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
2503 1c3231aa Thomas Thrainer
                      (self.cfg.GetNodeName(self.new_node_uuid), idx))
2504 763ad5be Thomas Thrainer
      # we pass force_create=True to force LVM creation
2505 763ad5be Thomas Thrainer
      for new_lv in dev.children:
2506 f2b58d93 Thomas Thrainer
        try:
2507 dad226e3 Thomas Thrainer
          _CreateBlockDevInner(self.lu, self.new_node_uuid, self.instance,
2508 dad226e3 Thomas Thrainer
                               new_lv, True, GetInstanceInfoText(self.instance),
2509 dad226e3 Thomas Thrainer
                               False, excl_stor)
2510 f2b58d93 Thomas Thrainer
        except errors.DeviceCreationError, e:
2511 f2b58d93 Thomas Thrainer
          raise errors.OpExecError("Can't create block device: %s" % e.message)
2512 763ad5be Thomas Thrainer
2513 763ad5be Thomas Thrainer
    # Step 4: dbrd minors and drbd setups changes
2514 763ad5be Thomas Thrainer
    # after this, we must manually remove the drbd minors on both the
2515 763ad5be Thomas Thrainer
    # error and the success paths
2516 763ad5be Thomas Thrainer
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
2517 1c3231aa Thomas Thrainer
    minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
2518 1c3231aa Thomas Thrainer
                                         for _ in self.instance.disks],
2519 da4a52a3 Thomas Thrainer
                                        self.instance.uuid)
2520 763ad5be Thomas Thrainer
    logging.debug("Allocated minors %r", minors)
2521 763ad5be Thomas Thrainer
2522 763ad5be Thomas Thrainer
    iv_names = {}
2523 763ad5be Thomas Thrainer
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
2524 763ad5be Thomas Thrainer
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
2525 1c3231aa Thomas Thrainer
                      (self.cfg.GetNodeName(self.new_node_uuid), idx))
2526 763ad5be Thomas Thrainer
      # create new devices on new_node; note that we create two IDs:
2527 763ad5be Thomas Thrainer
      # one without port, so the drbd will be activated without
2528 763ad5be Thomas Thrainer
      # networking information on the new node at this stage, and one
2529 763ad5be Thomas Thrainer
      # with network, for the latter activation in step 4
2530 763ad5be Thomas Thrainer
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
2531 763ad5be Thomas Thrainer
      if self.instance.primary_node == o_node1:
2532 763ad5be Thomas Thrainer
        p_minor = o_minor1
2533 763ad5be Thomas Thrainer
      else:
2534 763ad5be Thomas Thrainer
        assert self.instance.primary_node == o_node2, "Three-node instance?"
2535 763ad5be Thomas Thrainer
        p_minor = o_minor2
2536 763ad5be Thomas Thrainer
2537 1c3231aa Thomas Thrainer
      new_alone_id = (self.instance.primary_node, self.new_node_uuid, None,
2538 763ad5be Thomas Thrainer
                      p_minor, new_minor, o_secret)
2539 1c3231aa Thomas Thrainer
      new_net_id = (self.instance.primary_node, self.new_node_uuid, o_port,
2540 763ad5be Thomas Thrainer
                    p_minor, new_minor, o_secret)
2541 763ad5be Thomas Thrainer
2542 763ad5be Thomas Thrainer
      iv_names[idx] = (dev, dev.children, new_net_id)
2543 763ad5be Thomas Thrainer
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
2544 763ad5be Thomas Thrainer
                    new_net_id)
2545 763ad5be Thomas Thrainer
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
2546 763ad5be Thomas Thrainer
                              logical_id=new_alone_id,
2547 763ad5be Thomas Thrainer
                              children=dev.children,
2548 763ad5be Thomas Thrainer
                              size=dev.size,
2549 763ad5be Thomas Thrainer
                              params={})
2550 5eacbcae Thomas Thrainer
      (anno_new_drbd,) = AnnotateDiskParams(self.instance, [new_drbd],
2551 5eacbcae Thomas Thrainer
                                            self.cfg)
2552 763ad5be Thomas Thrainer
      try:
2553 1c3231aa Thomas Thrainer
        CreateSingleBlockDev(self.lu, self.new_node_uuid, self.instance,
2554 5eacbcae Thomas Thrainer
                             anno_new_drbd,
2555 5eacbcae Thomas Thrainer
                             GetInstanceInfoText(self.instance), False,
2556 5eacbcae Thomas Thrainer
                             excl_stor)
2557 763ad5be Thomas Thrainer
      except errors.GenericError:
2558 da4a52a3 Thomas Thrainer
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2559 763ad5be Thomas Thrainer
        raise
2560 763ad5be Thomas Thrainer
2561 763ad5be Thomas Thrainer
    # We have new devices, shutdown the drbd on the old secondary
2562 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2563 763ad5be Thomas Thrainer
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
2564 1c3231aa Thomas Thrainer
      self.cfg.SetDiskID(dev, self.target_node_uuid)
2565 1c3231aa Thomas Thrainer
      msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
2566 763ad5be Thomas Thrainer
                                            (dev, self.instance)).fail_msg
2567 763ad5be Thomas Thrainer
      if msg:
2568 763ad5be Thomas Thrainer
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
2569 763ad5be Thomas Thrainer
                           "node: %s" % (idx, msg),
2570 763ad5be Thomas Thrainer
                           hint=("Please cleanup this device manually as"
2571 763ad5be Thomas Thrainer
                                 " soon as possible"))
2572 763ad5be Thomas Thrainer
2573 763ad5be Thomas Thrainer
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
2574 763ad5be Thomas Thrainer
    result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
2575 763ad5be Thomas Thrainer
                                               self.instance.disks)[pnode]
2576 763ad5be Thomas Thrainer
2577 763ad5be Thomas Thrainer
    msg = result.fail_msg
2578 763ad5be Thomas Thrainer
    if msg:
2579 763ad5be Thomas Thrainer
      # detaches didn't succeed (unlikely)
2580 da4a52a3 Thomas Thrainer
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2581 763ad5be Thomas Thrainer
      raise errors.OpExecError("Can't detach the disks from the network on"
2582 763ad5be Thomas Thrainer
                               " old node: %s" % (msg,))
2583 763ad5be Thomas Thrainer
2584 763ad5be Thomas Thrainer
    # if we managed to detach at least one, we update all the disks of
2585 763ad5be Thomas Thrainer
    # the instance to point to the new secondary
2586 763ad5be Thomas Thrainer
    self.lu.LogInfo("Updating instance configuration")
2587 763ad5be Thomas Thrainer
    for dev, _, new_logical_id in iv_names.itervalues():
2588 763ad5be Thomas Thrainer
      dev.logical_id = new_logical_id
2589 763ad5be Thomas Thrainer
      self.cfg.SetDiskID(dev, self.instance.primary_node)
2590 763ad5be Thomas Thrainer
2591 763ad5be Thomas Thrainer
    self.cfg.Update(self.instance, feedback_fn)
2592 763ad5be Thomas Thrainer
2593 763ad5be Thomas Thrainer
    # Release all node locks (the configuration has been updated)
2594 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE)
2595 763ad5be Thomas Thrainer
2596 763ad5be Thomas Thrainer
    # and now perform the drbd attach
2597 763ad5be Thomas Thrainer
    self.lu.LogInfo("Attaching primary drbds to new secondary"
2598 763ad5be Thomas Thrainer
                    " (standalone => connected)")
2599 763ad5be Thomas Thrainer
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
2600 1c3231aa Thomas Thrainer
                                            self.new_node_uuid],
2601 763ad5be Thomas Thrainer
                                           self.node_secondary_ip,
2602 763ad5be Thomas Thrainer
                                           (self.instance.disks, self.instance),
2603 763ad5be Thomas Thrainer
                                           self.instance.name,
2604 763ad5be Thomas Thrainer
                                           False)
2605 763ad5be Thomas Thrainer
    for to_node, to_result in result.items():
2606 763ad5be Thomas Thrainer
      msg = to_result.fail_msg
2607 763ad5be Thomas Thrainer
      if msg:
2608 763ad5be Thomas Thrainer
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
2609 1c3231aa Thomas Thrainer
                           self.cfg.GetNodeName(to_node), msg,
2610 763ad5be Thomas Thrainer
                           hint=("please do a gnt-instance info to see the"
2611 763ad5be Thomas Thrainer
                                 " status of disks"))
2612 763ad5be Thomas Thrainer
2613 763ad5be Thomas Thrainer
    cstep = itertools.count(5)
2614 763ad5be Thomas Thrainer
2615 763ad5be Thomas Thrainer
    if self.early_release:
2616 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2617 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)
2618 763ad5be Thomas Thrainer
      # TODO: Check if releasing locks early still makes sense
2619 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
2620 763ad5be Thomas Thrainer
    else:
2621 763ad5be Thomas Thrainer
      # Release all resource locks except those used by the instance
2622 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
2623 5eacbcae Thomas Thrainer
                   keep=self.node_secondary_ip.keys())
2624 763ad5be Thomas Thrainer
2625 763ad5be Thomas Thrainer
    # TODO: Can the instance lock be downgraded here? Take the optional disk
2626 763ad5be Thomas Thrainer
    # shutdown in the caller into consideration.
2627 763ad5be Thomas Thrainer
2628 763ad5be Thomas Thrainer
    # Wait for sync
2629 763ad5be Thomas Thrainer
    # This can fail as the old devices are degraded and _WaitForSync
2630 763ad5be Thomas Thrainer
    # does a combined result over all disks, so we don't check its return value
2631 763ad5be Thomas Thrainer
    self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
2632 5eacbcae Thomas Thrainer
    WaitForSync(self.lu, self.instance)
2633 763ad5be Thomas Thrainer
2634 763ad5be Thomas Thrainer
    # Check all devices manually
2635 763ad5be Thomas Thrainer
    self._CheckDevices(self.instance.primary_node, iv_names)
2636 763ad5be Thomas Thrainer
2637 763ad5be Thomas Thrainer
    # Step: remove old storage
2638 763ad5be Thomas Thrainer
    if not self.early_release:
2639 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2640 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)