Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_storage.py @ fb62843c

History | View | Annotate | Download (99.5 kB)

1 763ad5be Thomas Thrainer
#
2 763ad5be Thomas Thrainer
#
3 763ad5be Thomas Thrainer
4 763ad5be Thomas Thrainer
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 763ad5be Thomas Thrainer
#
6 763ad5be Thomas Thrainer
# This program is free software; you can redistribute it and/or modify
7 763ad5be Thomas Thrainer
# it under the terms of the GNU General Public License as published by
8 763ad5be Thomas Thrainer
# the Free Software Foundation; either version 2 of the License, or
9 763ad5be Thomas Thrainer
# (at your option) any later version.
10 763ad5be Thomas Thrainer
#
11 763ad5be Thomas Thrainer
# This program is distributed in the hope that it will be useful, but
12 763ad5be Thomas Thrainer
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 763ad5be Thomas Thrainer
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 763ad5be Thomas Thrainer
# General Public License for more details.
15 763ad5be Thomas Thrainer
#
16 763ad5be Thomas Thrainer
# You should have received a copy of the GNU General Public License
17 763ad5be Thomas Thrainer
# along with this program; if not, write to the Free Software
18 763ad5be Thomas Thrainer
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 763ad5be Thomas Thrainer
# 02110-1301, USA.
20 763ad5be Thomas Thrainer
21 763ad5be Thomas Thrainer
22 763ad5be Thomas Thrainer
"""Logical units dealing with storage of instances."""
23 763ad5be Thomas Thrainer
24 763ad5be Thomas Thrainer
import itertools
25 763ad5be Thomas Thrainer
import logging
26 763ad5be Thomas Thrainer
import os
27 763ad5be Thomas Thrainer
import time
28 763ad5be Thomas Thrainer
29 763ad5be Thomas Thrainer
from ganeti import compat
30 763ad5be Thomas Thrainer
from ganeti import constants
31 763ad5be Thomas Thrainer
from ganeti import errors
32 763ad5be Thomas Thrainer
from ganeti import ht
33 763ad5be Thomas Thrainer
from ganeti import locking
34 763ad5be Thomas Thrainer
from ganeti.masterd import iallocator
35 763ad5be Thomas Thrainer
from ganeti import objects
36 763ad5be Thomas Thrainer
from ganeti import utils
37 763ad5be Thomas Thrainer
from ganeti import rpc
38 763ad5be Thomas Thrainer
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
39 763ad5be Thomas Thrainer
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
40 1c3231aa Thomas Thrainer
  AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeUuidAndName, \
41 5eacbcae Thomas Thrainer
  CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
42 1f7c8208 Helga Velroyen
  IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes, \
43 1f7c8208 Helga Velroyen
  CheckDiskTemplateEnabled
44 5eacbcae Thomas Thrainer
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
45 5eacbcae Thomas Thrainer
  CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
46 5eacbcae Thomas Thrainer
  BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
47 763ad5be Thomas Thrainer
48 763ad5be Thomas Thrainer
import ganeti.masterd.instance
49 763ad5be Thomas Thrainer
50 763ad5be Thomas Thrainer
51 763ad5be Thomas Thrainer
_DISK_TEMPLATE_NAME_PREFIX = {
52 763ad5be Thomas Thrainer
  constants.DT_PLAIN: "",
53 763ad5be Thomas Thrainer
  constants.DT_RBD: ".rbd",
54 763ad5be Thomas Thrainer
  constants.DT_EXT: ".ext",
55 763ad5be Thomas Thrainer
  }
56 763ad5be Thomas Thrainer
57 763ad5be Thomas Thrainer
58 1c3231aa Thomas Thrainer
def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
59 5eacbcae Thomas Thrainer
                         excl_stor):
60 763ad5be Thomas Thrainer
  """Create a single block device on a given node.
61 763ad5be Thomas Thrainer

62 763ad5be Thomas Thrainer
  This will not recurse over children of the device, so they must be
63 763ad5be Thomas Thrainer
  created in advance.
64 763ad5be Thomas Thrainer

65 763ad5be Thomas Thrainer
  @param lu: the lu on whose behalf we execute
66 1c3231aa Thomas Thrainer
  @param node_uuid: the node on which to create the device
67 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
68 763ad5be Thomas Thrainer
  @param instance: the instance which owns the device
69 763ad5be Thomas Thrainer
  @type device: L{objects.Disk}
70 763ad5be Thomas Thrainer
  @param device: the device to create
71 763ad5be Thomas Thrainer
  @param info: the extra 'metadata' we should attach to the device
72 763ad5be Thomas Thrainer
      (this will be represented as a LVM tag)
73 763ad5be Thomas Thrainer
  @type force_open: boolean
74 763ad5be Thomas Thrainer
  @param force_open: this parameter will be passes to the
75 763ad5be Thomas Thrainer
      L{backend.BlockdevCreate} function where it specifies
76 763ad5be Thomas Thrainer
      whether we run on primary or not, and it affects both
77 763ad5be Thomas Thrainer
      the child assembly and the device own Open() execution
78 763ad5be Thomas Thrainer
  @type excl_stor: boolean
79 763ad5be Thomas Thrainer
  @param excl_stor: Whether exclusive_storage is active for the node
80 763ad5be Thomas Thrainer

81 763ad5be Thomas Thrainer
  """
82 1c3231aa Thomas Thrainer
  lu.cfg.SetDiskID(device, node_uuid)
83 1c3231aa Thomas Thrainer
  result = lu.rpc.call_blockdev_create(node_uuid, device, device.size,
84 763ad5be Thomas Thrainer
                                       instance.name, force_open, info,
85 763ad5be Thomas Thrainer
                                       excl_stor)
86 763ad5be Thomas Thrainer
  result.Raise("Can't create block device %s on"
87 1c3231aa Thomas Thrainer
               " node %s for instance %s" % (device,
88 1c3231aa Thomas Thrainer
                                             lu.cfg.GetNodeName(node_uuid),
89 1c3231aa Thomas Thrainer
                                             instance.name))
90 763ad5be Thomas Thrainer
  if device.physical_id is None:
91 763ad5be Thomas Thrainer
    device.physical_id = result.payload
92 763ad5be Thomas Thrainer
93 763ad5be Thomas Thrainer
94 1c3231aa Thomas Thrainer
def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create,
95 763ad5be Thomas Thrainer
                         info, force_open, excl_stor):
96 763ad5be Thomas Thrainer
  """Create a tree of block devices on a given node.
97 763ad5be Thomas Thrainer

98 763ad5be Thomas Thrainer
  If this device type has to be created on secondaries, create it and
99 763ad5be Thomas Thrainer
  all its children.
100 763ad5be Thomas Thrainer

101 763ad5be Thomas Thrainer
  If not, just recurse to children keeping the same 'force' value.
102 763ad5be Thomas Thrainer

103 763ad5be Thomas Thrainer
  @attention: The device has to be annotated already.
104 763ad5be Thomas Thrainer

105 763ad5be Thomas Thrainer
  @param lu: the lu on whose behalf we execute
106 1c3231aa Thomas Thrainer
  @param node_uuid: the node on which to create the device
107 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
108 763ad5be Thomas Thrainer
  @param instance: the instance which owns the device
109 763ad5be Thomas Thrainer
  @type device: L{objects.Disk}
110 763ad5be Thomas Thrainer
  @param device: the device to create
111 763ad5be Thomas Thrainer
  @type force_create: boolean
112 763ad5be Thomas Thrainer
  @param force_create: whether to force creation of this device; this
113 763ad5be Thomas Thrainer
      will be change to True whenever we find a device which has
114 763ad5be Thomas Thrainer
      CreateOnSecondary() attribute
115 763ad5be Thomas Thrainer
  @param info: the extra 'metadata' we should attach to the device
116 763ad5be Thomas Thrainer
      (this will be represented as a LVM tag)
117 763ad5be Thomas Thrainer
  @type force_open: boolean
118 763ad5be Thomas Thrainer
  @param force_open: this parameter will be passes to the
119 763ad5be Thomas Thrainer
      L{backend.BlockdevCreate} function where it specifies
120 763ad5be Thomas Thrainer
      whether we run on primary or not, and it affects both
121 763ad5be Thomas Thrainer
      the child assembly and the device own Open() execution
122 763ad5be Thomas Thrainer
  @type excl_stor: boolean
123 763ad5be Thomas Thrainer
  @param excl_stor: Whether exclusive_storage is active for the node
124 763ad5be Thomas Thrainer

125 763ad5be Thomas Thrainer
  @return: list of created devices
126 763ad5be Thomas Thrainer
  """
127 763ad5be Thomas Thrainer
  created_devices = []
128 763ad5be Thomas Thrainer
  try:
129 763ad5be Thomas Thrainer
    if device.CreateOnSecondary():
130 763ad5be Thomas Thrainer
      force_create = True
131 763ad5be Thomas Thrainer
132 763ad5be Thomas Thrainer
    if device.children:
133 763ad5be Thomas Thrainer
      for child in device.children:
134 1c3231aa Thomas Thrainer
        devs = _CreateBlockDevInner(lu, node_uuid, instance, child,
135 1c3231aa Thomas Thrainer
                                    force_create, info, force_open, excl_stor)
136 763ad5be Thomas Thrainer
        created_devices.extend(devs)
137 763ad5be Thomas Thrainer
138 763ad5be Thomas Thrainer
    if not force_create:
139 763ad5be Thomas Thrainer
      return created_devices
140 763ad5be Thomas Thrainer
141 1c3231aa Thomas Thrainer
    CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
142 5eacbcae Thomas Thrainer
                         excl_stor)
143 763ad5be Thomas Thrainer
    # The device has been completely created, so there is no point in keeping
144 763ad5be Thomas Thrainer
    # its subdevices in the list. We just add the device itself instead.
145 1c3231aa Thomas Thrainer
    created_devices = [(node_uuid, device)]
146 763ad5be Thomas Thrainer
    return created_devices
147 763ad5be Thomas Thrainer
148 763ad5be Thomas Thrainer
  except errors.DeviceCreationError, e:
149 763ad5be Thomas Thrainer
    e.created_devices.extend(created_devices)
150 763ad5be Thomas Thrainer
    raise e
151 763ad5be Thomas Thrainer
  except errors.OpExecError, e:
152 763ad5be Thomas Thrainer
    raise errors.DeviceCreationError(str(e), created_devices)
153 763ad5be Thomas Thrainer
154 763ad5be Thomas Thrainer
155 1c3231aa Thomas Thrainer
def IsExclusiveStorageEnabledNodeUuid(cfg, node_uuid):
156 763ad5be Thomas Thrainer
  """Whether exclusive_storage is in effect for the given node.
157 763ad5be Thomas Thrainer

158 763ad5be Thomas Thrainer
  @type cfg: L{config.ConfigWriter}
159 763ad5be Thomas Thrainer
  @param cfg: The cluster configuration
160 1c3231aa Thomas Thrainer
  @type node_uuid: string
161 1c3231aa Thomas Thrainer
  @param node_uuid: The node UUID
162 763ad5be Thomas Thrainer
  @rtype: bool
163 763ad5be Thomas Thrainer
  @return: The effective value of exclusive_storage
164 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if no node exists with the given name
165 763ad5be Thomas Thrainer

166 763ad5be Thomas Thrainer
  """
167 1c3231aa Thomas Thrainer
  ni = cfg.GetNodeInfo(node_uuid)
168 763ad5be Thomas Thrainer
  if ni is None:
169 1c3231aa Thomas Thrainer
    raise errors.OpPrereqError("Invalid node UUID %s" % node_uuid,
170 763ad5be Thomas Thrainer
                               errors.ECODE_NOENT)
171 5eacbcae Thomas Thrainer
  return IsExclusiveStorageEnabledNode(cfg, ni)
172 763ad5be Thomas Thrainer
173 763ad5be Thomas Thrainer
174 1c3231aa Thomas Thrainer
def _CreateBlockDev(lu, node_uuid, instance, device, force_create, info,
175 763ad5be Thomas Thrainer
                    force_open):
176 763ad5be Thomas Thrainer
  """Wrapper around L{_CreateBlockDevInner}.
177 763ad5be Thomas Thrainer

178 763ad5be Thomas Thrainer
  This method annotates the root device first.
179 763ad5be Thomas Thrainer

180 763ad5be Thomas Thrainer
  """
181 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [device], lu.cfg)
182 1c3231aa Thomas Thrainer
  excl_stor = IsExclusiveStorageEnabledNodeUuid(lu.cfg, node_uuid)
183 1c3231aa Thomas Thrainer
  return _CreateBlockDevInner(lu, node_uuid, instance, disk, force_create, info,
184 763ad5be Thomas Thrainer
                              force_open, excl_stor)
185 763ad5be Thomas Thrainer
186 763ad5be Thomas Thrainer
187 a365b47f Bernardo Dal Seno
def _UndoCreateDisks(lu, disks_created):
188 a365b47f Bernardo Dal Seno
  """Undo the work performed by L{CreateDisks}.
189 a365b47f Bernardo Dal Seno

190 a365b47f Bernardo Dal Seno
  This function is called in case of an error to undo the work of
191 a365b47f Bernardo Dal Seno
  L{CreateDisks}.
192 a365b47f Bernardo Dal Seno

193 a365b47f Bernardo Dal Seno
  @type lu: L{LogicalUnit}
194 a365b47f Bernardo Dal Seno
  @param lu: the logical unit on whose behalf we execute
195 a365b47f Bernardo Dal Seno
  @param disks_created: the result returned by L{CreateDisks}
196 a365b47f Bernardo Dal Seno

197 a365b47f Bernardo Dal Seno
  """
198 1c3231aa Thomas Thrainer
  for (node_uuid, disk) in disks_created:
199 1c3231aa Thomas Thrainer
    lu.cfg.SetDiskID(disk, node_uuid)
200 1c3231aa Thomas Thrainer
    result = lu.rpc.call_blockdev_remove(node_uuid, disk)
201 c7dd65be Klaus Aehlig
    result.Warn("Failed to remove newly-created disk %s on node %s" %
202 1c3231aa Thomas Thrainer
                (disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
203 a365b47f Bernardo Dal Seno
204 a365b47f Bernardo Dal Seno
205 1c3231aa Thomas Thrainer
def CreateDisks(lu, instance, to_skip=None, target_node_uuid=None, disks=None):
206 763ad5be Thomas Thrainer
  """Create all disks for an instance.
207 763ad5be Thomas Thrainer

208 763ad5be Thomas Thrainer
  This abstracts away some work from AddInstance.
209 763ad5be Thomas Thrainer

210 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
211 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
212 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
213 763ad5be Thomas Thrainer
  @param instance: the instance whose disks we should create
214 763ad5be Thomas Thrainer
  @type to_skip: list
215 763ad5be Thomas Thrainer
  @param to_skip: list of indices to skip
216 1c3231aa Thomas Thrainer
  @type target_node_uuid: string
217 1c3231aa Thomas Thrainer
  @param target_node_uuid: if passed, overrides the target node for creation
218 a365b47f Bernardo Dal Seno
  @type disks: list of {objects.Disk}
219 a365b47f Bernardo Dal Seno
  @param disks: the disks to create; if not specified, all the disks of the
220 a365b47f Bernardo Dal Seno
      instance are created
221 a365b47f Bernardo Dal Seno
  @return: information about the created disks, to be used to call
222 a365b47f Bernardo Dal Seno
      L{_UndoCreateDisks}
223 a365b47f Bernardo Dal Seno
  @raise errors.OpPrereqError: in case of error
224 763ad5be Thomas Thrainer

225 763ad5be Thomas Thrainer
  """
226 5eacbcae Thomas Thrainer
  info = GetInstanceInfoText(instance)
227 1c3231aa Thomas Thrainer
  if target_node_uuid is None:
228 1c3231aa Thomas Thrainer
    pnode_uuid = instance.primary_node
229 1c3231aa Thomas Thrainer
    all_node_uuids = instance.all_nodes
230 763ad5be Thomas Thrainer
  else:
231 1c3231aa Thomas Thrainer
    pnode_uuid = target_node_uuid
232 1c3231aa Thomas Thrainer
    all_node_uuids = [pnode_uuid]
233 763ad5be Thomas Thrainer
234 a365b47f Bernardo Dal Seno
  if disks is None:
235 a365b47f Bernardo Dal Seno
    disks = instance.disks
236 a365b47f Bernardo Dal Seno
237 1f7c8208 Helga Velroyen
  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
238 1f7c8208 Helga Velroyen
239 763ad5be Thomas Thrainer
  if instance.disk_template in constants.DTS_FILEBASED:
240 763ad5be Thomas Thrainer
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
241 1c3231aa Thomas Thrainer
    result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
242 763ad5be Thomas Thrainer
243 763ad5be Thomas Thrainer
    result.Raise("Failed to create directory '%s' on"
244 1c3231aa Thomas Thrainer
                 " node %s" % (file_storage_dir,
245 1c3231aa Thomas Thrainer
                               lu.cfg.GetNodeName(pnode_uuid)))
246 763ad5be Thomas Thrainer
247 763ad5be Thomas Thrainer
  disks_created = []
248 a365b47f Bernardo Dal Seno
  for idx, device in enumerate(disks):
249 763ad5be Thomas Thrainer
    if to_skip and idx in to_skip:
250 763ad5be Thomas Thrainer
      continue
251 763ad5be Thomas Thrainer
    logging.info("Creating disk %s for instance '%s'", idx, instance.name)
252 1c3231aa Thomas Thrainer
    for node_uuid in all_node_uuids:
253 1c3231aa Thomas Thrainer
      f_create = node_uuid == pnode_uuid
254 763ad5be Thomas Thrainer
      try:
255 1c3231aa Thomas Thrainer
        _CreateBlockDev(lu, node_uuid, instance, device, f_create, info,
256 1c3231aa Thomas Thrainer
                        f_create)
257 1c3231aa Thomas Thrainer
        disks_created.append((node_uuid, device))
258 763ad5be Thomas Thrainer
      except errors.DeviceCreationError, e:
259 763ad5be Thomas Thrainer
        logging.warning("Creating disk %s for instance '%s' failed",
260 763ad5be Thomas Thrainer
                        idx, instance.name)
261 763ad5be Thomas Thrainer
        disks_created.extend(e.created_devices)
262 a365b47f Bernardo Dal Seno
        _UndoCreateDisks(lu, disks_created)
263 763ad5be Thomas Thrainer
        raise errors.OpExecError(e.message)
264 a365b47f Bernardo Dal Seno
  return disks_created
265 763ad5be Thomas Thrainer
266 763ad5be Thomas Thrainer
267 5eacbcae Thomas Thrainer
def ComputeDiskSizePerVG(disk_template, disks):
268 763ad5be Thomas Thrainer
  """Compute disk size requirements in the volume group
269 763ad5be Thomas Thrainer

270 763ad5be Thomas Thrainer
  """
271 763ad5be Thomas Thrainer
  def _compute(disks, payload):
272 763ad5be Thomas Thrainer
    """Universal algorithm.
273 763ad5be Thomas Thrainer

274 763ad5be Thomas Thrainer
    """
275 763ad5be Thomas Thrainer
    vgs = {}
276 763ad5be Thomas Thrainer
    for disk in disks:
277 763ad5be Thomas Thrainer
      vgs[disk[constants.IDISK_VG]] = \
278 763ad5be Thomas Thrainer
        vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
279 763ad5be Thomas Thrainer
280 763ad5be Thomas Thrainer
    return vgs
281 763ad5be Thomas Thrainer
282 763ad5be Thomas Thrainer
  # Required free disk space as a function of disk and swap space
283 763ad5be Thomas Thrainer
  req_size_dict = {
284 763ad5be Thomas Thrainer
    constants.DT_DISKLESS: {},
285 763ad5be Thomas Thrainer
    constants.DT_PLAIN: _compute(disks, 0),
286 763ad5be Thomas Thrainer
    # 128 MB are added for drbd metadata for each disk
287 763ad5be Thomas Thrainer
    constants.DT_DRBD8: _compute(disks, constants.DRBD_META_SIZE),
288 763ad5be Thomas Thrainer
    constants.DT_FILE: {},
289 763ad5be Thomas Thrainer
    constants.DT_SHARED_FILE: {},
290 763ad5be Thomas Thrainer
    }
291 763ad5be Thomas Thrainer
292 763ad5be Thomas Thrainer
  if disk_template not in req_size_dict:
293 763ad5be Thomas Thrainer
    raise errors.ProgrammerError("Disk template '%s' size requirement"
294 763ad5be Thomas Thrainer
                                 " is unknown" % disk_template)
295 763ad5be Thomas Thrainer
296 763ad5be Thomas Thrainer
  return req_size_dict[disk_template]
297 763ad5be Thomas Thrainer
298 763ad5be Thomas Thrainer
299 5eacbcae Thomas Thrainer
def ComputeDisks(op, default_vg):
300 763ad5be Thomas Thrainer
  """Computes the instance disks.
301 763ad5be Thomas Thrainer

302 763ad5be Thomas Thrainer
  @param op: The instance opcode
303 763ad5be Thomas Thrainer
  @param default_vg: The default_vg to assume
304 763ad5be Thomas Thrainer

305 763ad5be Thomas Thrainer
  @return: The computed disks
306 763ad5be Thomas Thrainer

307 763ad5be Thomas Thrainer
  """
308 763ad5be Thomas Thrainer
  disks = []
309 763ad5be Thomas Thrainer
  for disk in op.disks:
310 763ad5be Thomas Thrainer
    mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
311 763ad5be Thomas Thrainer
    if mode not in constants.DISK_ACCESS_SET:
312 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk access mode '%s'" %
313 763ad5be Thomas Thrainer
                                 mode, errors.ECODE_INVAL)
314 763ad5be Thomas Thrainer
    size = disk.get(constants.IDISK_SIZE, None)
315 763ad5be Thomas Thrainer
    if size is None:
316 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
317 763ad5be Thomas Thrainer
    try:
318 763ad5be Thomas Thrainer
      size = int(size)
319 763ad5be Thomas Thrainer
    except (TypeError, ValueError):
320 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk size '%s'" % size,
321 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
322 763ad5be Thomas Thrainer
323 763ad5be Thomas Thrainer
    ext_provider = disk.get(constants.IDISK_PROVIDER, None)
324 763ad5be Thomas Thrainer
    if ext_provider and op.disk_template != constants.DT_EXT:
325 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The '%s' option is only valid for the %s"
326 763ad5be Thomas Thrainer
                                 " disk template, not %s" %
327 763ad5be Thomas Thrainer
                                 (constants.IDISK_PROVIDER, constants.DT_EXT,
328 763ad5be Thomas Thrainer
                                  op.disk_template), errors.ECODE_INVAL)
329 763ad5be Thomas Thrainer
330 763ad5be Thomas Thrainer
    data_vg = disk.get(constants.IDISK_VG, default_vg)
331 763ad5be Thomas Thrainer
    name = disk.get(constants.IDISK_NAME, None)
332 763ad5be Thomas Thrainer
    if name is not None and name.lower() == constants.VALUE_NONE:
333 763ad5be Thomas Thrainer
      name = None
334 763ad5be Thomas Thrainer
    new_disk = {
335 763ad5be Thomas Thrainer
      constants.IDISK_SIZE: size,
336 763ad5be Thomas Thrainer
      constants.IDISK_MODE: mode,
337 763ad5be Thomas Thrainer
      constants.IDISK_VG: data_vg,
338 763ad5be Thomas Thrainer
      constants.IDISK_NAME: name,
339 763ad5be Thomas Thrainer
      }
340 763ad5be Thomas Thrainer
341 3f3ea14c Bernardo Dal Seno
    for key in [
342 3f3ea14c Bernardo Dal Seno
      constants.IDISK_METAVG,
343 3f3ea14c Bernardo Dal Seno
      constants.IDISK_ADOPT,
344 3f3ea14c Bernardo Dal Seno
      constants.IDISK_SPINDLES,
345 3f3ea14c Bernardo Dal Seno
      ]:
346 3f3ea14c Bernardo Dal Seno
      if key in disk:
347 3f3ea14c Bernardo Dal Seno
        new_disk[key] = disk[key]
348 763ad5be Thomas Thrainer
349 763ad5be Thomas Thrainer
    # For extstorage, demand the `provider' option and add any
350 763ad5be Thomas Thrainer
    # additional parameters (ext-params) to the dict
351 763ad5be Thomas Thrainer
    if op.disk_template == constants.DT_EXT:
352 763ad5be Thomas Thrainer
      if ext_provider:
353 763ad5be Thomas Thrainer
        new_disk[constants.IDISK_PROVIDER] = ext_provider
354 763ad5be Thomas Thrainer
        for key in disk:
355 763ad5be Thomas Thrainer
          if key not in constants.IDISK_PARAMS:
356 763ad5be Thomas Thrainer
            new_disk[key] = disk[key]
357 763ad5be Thomas Thrainer
      else:
358 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Missing provider for template '%s'" %
359 763ad5be Thomas Thrainer
                                   constants.DT_EXT, errors.ECODE_INVAL)
360 763ad5be Thomas Thrainer
361 763ad5be Thomas Thrainer
    disks.append(new_disk)
362 763ad5be Thomas Thrainer
363 763ad5be Thomas Thrainer
  return disks
364 763ad5be Thomas Thrainer
365 763ad5be Thomas Thrainer
366 5eacbcae Thomas Thrainer
def CheckRADOSFreeSpace():
367 763ad5be Thomas Thrainer
  """Compute disk size requirements inside the RADOS cluster.
368 763ad5be Thomas Thrainer

369 763ad5be Thomas Thrainer
  """
370 763ad5be Thomas Thrainer
  # For the RADOS cluster we assume there is always enough space.
371 763ad5be Thomas Thrainer
  pass
372 763ad5be Thomas Thrainer
373 763ad5be Thomas Thrainer
374 1c3231aa Thomas Thrainer
def _GenerateDRBD8Branch(lu, primary_uuid, secondary_uuid, size, vgnames, names,
375 763ad5be Thomas Thrainer
                         iv_name, p_minor, s_minor):
376 763ad5be Thomas Thrainer
  """Generate a drbd8 device complete with its children.
377 763ad5be Thomas Thrainer

378 763ad5be Thomas Thrainer
  """
379 763ad5be Thomas Thrainer
  assert len(vgnames) == len(names) == 2
380 763ad5be Thomas Thrainer
  port = lu.cfg.AllocatePort()
381 763ad5be Thomas Thrainer
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
382 763ad5be Thomas Thrainer
383 cd3b4ff4 Helga Velroyen
  dev_data = objects.Disk(dev_type=constants.DT_PLAIN, size=size,
384 763ad5be Thomas Thrainer
                          logical_id=(vgnames[0], names[0]),
385 763ad5be Thomas Thrainer
                          params={})
386 763ad5be Thomas Thrainer
  dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
387 cd3b4ff4 Helga Velroyen
  dev_meta = objects.Disk(dev_type=constants.DT_PLAIN,
388 763ad5be Thomas Thrainer
                          size=constants.DRBD_META_SIZE,
389 763ad5be Thomas Thrainer
                          logical_id=(vgnames[1], names[1]),
390 763ad5be Thomas Thrainer
                          params={})
391 763ad5be Thomas Thrainer
  dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
392 cd3b4ff4 Helga Velroyen
  drbd_dev = objects.Disk(dev_type=constants.DT_DRBD8, size=size,
393 1c3231aa Thomas Thrainer
                          logical_id=(primary_uuid, secondary_uuid, port,
394 763ad5be Thomas Thrainer
                                      p_minor, s_minor,
395 763ad5be Thomas Thrainer
                                      shared_secret),
396 763ad5be Thomas Thrainer
                          children=[dev_data, dev_meta],
397 763ad5be Thomas Thrainer
                          iv_name=iv_name, params={})
398 763ad5be Thomas Thrainer
  drbd_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
399 763ad5be Thomas Thrainer
  return drbd_dev
400 763ad5be Thomas Thrainer
401 763ad5be Thomas Thrainer
402 5eacbcae Thomas Thrainer
def GenerateDiskTemplate(
403 da4a52a3 Thomas Thrainer
  lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
404 763ad5be Thomas Thrainer
  disk_info, file_storage_dir, file_driver, base_index,
405 850c53f1 Helga Velroyen
  feedback_fn, full_disk_params):
406 763ad5be Thomas Thrainer
  """Generate the entire disk layout for a given template type.
407 763ad5be Thomas Thrainer

408 763ad5be Thomas Thrainer
  """
409 763ad5be Thomas Thrainer
  vgname = lu.cfg.GetVGName()
410 763ad5be Thomas Thrainer
  disk_count = len(disk_info)
411 763ad5be Thomas Thrainer
  disks = []
412 763ad5be Thomas Thrainer
413 850c53f1 Helga Velroyen
  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name)
414 850c53f1 Helga Velroyen
415 763ad5be Thomas Thrainer
  if template_name == constants.DT_DISKLESS:
416 763ad5be Thomas Thrainer
    pass
417 763ad5be Thomas Thrainer
  elif template_name == constants.DT_DRBD8:
418 1c3231aa Thomas Thrainer
    if len(secondary_node_uuids) != 1:
419 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Wrong template configuration")
420 1c3231aa Thomas Thrainer
    remote_node_uuid = secondary_node_uuids[0]
421 763ad5be Thomas Thrainer
    minors = lu.cfg.AllocateDRBDMinor(
422 da4a52a3 Thomas Thrainer
      [primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid)
423 763ad5be Thomas Thrainer
424 763ad5be Thomas Thrainer
    (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
425 763ad5be Thomas Thrainer
                                                       full_disk_params)
426 763ad5be Thomas Thrainer
    drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
427 763ad5be Thomas Thrainer
428 763ad5be Thomas Thrainer
    names = []
429 763ad5be Thomas Thrainer
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
430 763ad5be Thomas Thrainer
                                               for i in range(disk_count)]):
431 763ad5be Thomas Thrainer
      names.append(lv_prefix + "_data")
432 763ad5be Thomas Thrainer
      names.append(lv_prefix + "_meta")
433 763ad5be Thomas Thrainer
    for idx, disk in enumerate(disk_info):
434 763ad5be Thomas Thrainer
      disk_index = idx + base_index
435 763ad5be Thomas Thrainer
      data_vg = disk.get(constants.IDISK_VG, vgname)
436 763ad5be Thomas Thrainer
      meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
437 1c3231aa Thomas Thrainer
      disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid,
438 763ad5be Thomas Thrainer
                                      disk[constants.IDISK_SIZE],
439 763ad5be Thomas Thrainer
                                      [data_vg, meta_vg],
440 763ad5be Thomas Thrainer
                                      names[idx * 2:idx * 2 + 2],
441 763ad5be Thomas Thrainer
                                      "disk/%d" % disk_index,
442 763ad5be Thomas Thrainer
                                      minors[idx * 2], minors[idx * 2 + 1])
443 763ad5be Thomas Thrainer
      disk_dev.mode = disk[constants.IDISK_MODE]
444 763ad5be Thomas Thrainer
      disk_dev.name = disk.get(constants.IDISK_NAME, None)
445 763ad5be Thomas Thrainer
      disks.append(disk_dev)
446 763ad5be Thomas Thrainer
  else:
447 1c3231aa Thomas Thrainer
    if secondary_node_uuids:
448 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Wrong template configuration")
449 763ad5be Thomas Thrainer
450 763ad5be Thomas Thrainer
    name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
451 763ad5be Thomas Thrainer
    if name_prefix is None:
452 763ad5be Thomas Thrainer
      names = None
453 763ad5be Thomas Thrainer
    else:
454 763ad5be Thomas Thrainer
      names = _GenerateUniqueNames(lu, ["%s.disk%s" %
455 763ad5be Thomas Thrainer
                                        (name_prefix, base_index + i)
456 763ad5be Thomas Thrainer
                                        for i in range(disk_count)])
457 763ad5be Thomas Thrainer
458 763ad5be Thomas Thrainer
    if template_name == constants.DT_PLAIN:
459 763ad5be Thomas Thrainer
460 763ad5be Thomas Thrainer
      def logical_id_fn(idx, _, disk):
461 763ad5be Thomas Thrainer
        vg = disk.get(constants.IDISK_VG, vgname)
462 763ad5be Thomas Thrainer
        return (vg, names[idx])
463 763ad5be Thomas Thrainer
464 763ad5be Thomas Thrainer
    elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
465 763ad5be Thomas Thrainer
      logical_id_fn = \
466 763ad5be Thomas Thrainer
        lambda _, disk_index, disk: (file_driver,
467 763ad5be Thomas Thrainer
                                     "%s/disk%d" % (file_storage_dir,
468 763ad5be Thomas Thrainer
                                                    disk_index))
469 763ad5be Thomas Thrainer
    elif template_name == constants.DT_BLOCK:
470 763ad5be Thomas Thrainer
      logical_id_fn = \
471 763ad5be Thomas Thrainer
        lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
472 763ad5be Thomas Thrainer
                                       disk[constants.IDISK_ADOPT])
473 763ad5be Thomas Thrainer
    elif template_name == constants.DT_RBD:
474 763ad5be Thomas Thrainer
      logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
475 763ad5be Thomas Thrainer
    elif template_name == constants.DT_EXT:
476 763ad5be Thomas Thrainer
      def logical_id_fn(idx, _, disk):
477 763ad5be Thomas Thrainer
        provider = disk.get(constants.IDISK_PROVIDER, None)
478 763ad5be Thomas Thrainer
        if provider is None:
479 763ad5be Thomas Thrainer
          raise errors.ProgrammerError("Disk template is %s, but '%s' is"
480 763ad5be Thomas Thrainer
                                       " not found", constants.DT_EXT,
481 763ad5be Thomas Thrainer
                                       constants.IDISK_PROVIDER)
482 763ad5be Thomas Thrainer
        return (provider, names[idx])
483 763ad5be Thomas Thrainer
    else:
484 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
485 763ad5be Thomas Thrainer
486 cd3b4ff4 Helga Velroyen
    dev_type = template_name
487 763ad5be Thomas Thrainer
488 763ad5be Thomas Thrainer
    for idx, disk in enumerate(disk_info):
489 763ad5be Thomas Thrainer
      params = {}
490 763ad5be Thomas Thrainer
      # Only for the Ext template add disk_info to params
491 763ad5be Thomas Thrainer
      if template_name == constants.DT_EXT:
492 763ad5be Thomas Thrainer
        params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
493 763ad5be Thomas Thrainer
        for key in disk:
494 763ad5be Thomas Thrainer
          if key not in constants.IDISK_PARAMS:
495 763ad5be Thomas Thrainer
            params[key] = disk[key]
496 763ad5be Thomas Thrainer
      disk_index = idx + base_index
497 763ad5be Thomas Thrainer
      size = disk[constants.IDISK_SIZE]
498 763ad5be Thomas Thrainer
      feedback_fn("* disk %s, size %s" %
499 763ad5be Thomas Thrainer
                  (disk_index, utils.FormatUnit(size, "h")))
500 763ad5be Thomas Thrainer
      disk_dev = objects.Disk(dev_type=dev_type, size=size,
501 763ad5be Thomas Thrainer
                              logical_id=logical_id_fn(idx, disk_index, disk),
502 763ad5be Thomas Thrainer
                              iv_name="disk/%d" % disk_index,
503 763ad5be Thomas Thrainer
                              mode=disk[constants.IDISK_MODE],
504 b54ecf12 Bernardo Dal Seno
                              params=params,
505 b54ecf12 Bernardo Dal Seno
                              spindles=disk.get(constants.IDISK_SPINDLES))
506 763ad5be Thomas Thrainer
      disk_dev.name = disk.get(constants.IDISK_NAME, None)
507 763ad5be Thomas Thrainer
      disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
508 763ad5be Thomas Thrainer
      disks.append(disk_dev)
509 763ad5be Thomas Thrainer
510 763ad5be Thomas Thrainer
  return disks
511 763ad5be Thomas Thrainer
512 763ad5be Thomas Thrainer
513 7c848a6a Bernardo Dal Seno
def CheckSpindlesExclusiveStorage(diskdict, es_flag, required):
514 3f3ea14c Bernardo Dal Seno
  """Check the presence of the spindle options with exclusive_storage.
515 3f3ea14c Bernardo Dal Seno

516 3f3ea14c Bernardo Dal Seno
  @type diskdict: dict
517 3f3ea14c Bernardo Dal Seno
  @param diskdict: disk parameters
518 3f3ea14c Bernardo Dal Seno
  @type es_flag: bool
519 3f3ea14c Bernardo Dal Seno
  @param es_flag: the effective value of the exlusive_storage flag
520 7c848a6a Bernardo Dal Seno
  @type required: bool
521 7c848a6a Bernardo Dal Seno
  @param required: whether spindles are required or just optional
522 3f3ea14c Bernardo Dal Seno
  @raise errors.OpPrereqError when spindles are given and they should not
523 3f3ea14c Bernardo Dal Seno

524 3f3ea14c Bernardo Dal Seno
  """
525 3f3ea14c Bernardo Dal Seno
  if (not es_flag and constants.IDISK_SPINDLES in diskdict and
526 3f3ea14c Bernardo Dal Seno
      diskdict[constants.IDISK_SPINDLES] is not None):
527 3f3ea14c Bernardo Dal Seno
    raise errors.OpPrereqError("Spindles in instance disks cannot be specified"
528 3f3ea14c Bernardo Dal Seno
                               " when exclusive storage is not active",
529 3f3ea14c Bernardo Dal Seno
                               errors.ECODE_INVAL)
530 7c848a6a Bernardo Dal Seno
  if (es_flag and required and (constants.IDISK_SPINDLES not in diskdict or
531 7c848a6a Bernardo Dal Seno
                                diskdict[constants.IDISK_SPINDLES] is None)):
532 7c848a6a Bernardo Dal Seno
    raise errors.OpPrereqError("You must specify spindles in instance disks"
533 7c848a6a Bernardo Dal Seno
                               " when exclusive storage is active",
534 7c848a6a Bernardo Dal Seno
                               errors.ECODE_INVAL)
535 3f3ea14c Bernardo Dal Seno
536 3f3ea14c Bernardo Dal Seno
537 763ad5be Thomas Thrainer
class LUInstanceRecreateDisks(LogicalUnit):
538 763ad5be Thomas Thrainer
  """Recreate an instance's missing disks.
539 763ad5be Thomas Thrainer

540 763ad5be Thomas Thrainer
  """
541 763ad5be Thomas Thrainer
  HPATH = "instance-recreate-disks"
542 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
543 763ad5be Thomas Thrainer
  REQ_BGL = False
544 763ad5be Thomas Thrainer
545 763ad5be Thomas Thrainer
  _MODIFYABLE = compat.UniqueFrozenset([
546 763ad5be Thomas Thrainer
    constants.IDISK_SIZE,
547 763ad5be Thomas Thrainer
    constants.IDISK_MODE,
548 c615590c Bernardo Dal Seno
    constants.IDISK_SPINDLES,
549 763ad5be Thomas Thrainer
    ])
550 763ad5be Thomas Thrainer
551 763ad5be Thomas Thrainer
  # New or changed disk parameters may have different semantics
552 763ad5be Thomas Thrainer
  assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
553 763ad5be Thomas Thrainer
    constants.IDISK_ADOPT,
554 763ad5be Thomas Thrainer
555 763ad5be Thomas Thrainer
    # TODO: Implement support changing VG while recreating
556 763ad5be Thomas Thrainer
    constants.IDISK_VG,
557 763ad5be Thomas Thrainer
    constants.IDISK_METAVG,
558 763ad5be Thomas Thrainer
    constants.IDISK_PROVIDER,
559 763ad5be Thomas Thrainer
    constants.IDISK_NAME,
560 763ad5be Thomas Thrainer
    ]))
561 763ad5be Thomas Thrainer
562 763ad5be Thomas Thrainer
  def _RunAllocator(self):
563 763ad5be Thomas Thrainer
    """Run the allocator based on input opcode.
564 763ad5be Thomas Thrainer

565 763ad5be Thomas Thrainer
    """
566 763ad5be Thomas Thrainer
    be_full = self.cfg.GetClusterInfo().FillBE(self.instance)
567 763ad5be Thomas Thrainer
568 763ad5be Thomas Thrainer
    # FIXME
569 763ad5be Thomas Thrainer
    # The allocator should actually run in "relocate" mode, but current
570 763ad5be Thomas Thrainer
    # allocators don't support relocating all the nodes of an instance at
571 763ad5be Thomas Thrainer
    # the same time. As a workaround we use "allocate" mode, but this is
572 763ad5be Thomas Thrainer
    # suboptimal for two reasons:
573 763ad5be Thomas Thrainer
    # - The instance name passed to the allocator is present in the list of
574 763ad5be Thomas Thrainer
    #   existing instances, so there could be a conflict within the
575 763ad5be Thomas Thrainer
    #   internal structures of the allocator. This doesn't happen with the
576 763ad5be Thomas Thrainer
    #   current allocators, but it's a liability.
577 763ad5be Thomas Thrainer
    # - The allocator counts the resources used by the instance twice: once
578 763ad5be Thomas Thrainer
    #   because the instance exists already, and once because it tries to
579 763ad5be Thomas Thrainer
    #   allocate a new instance.
580 763ad5be Thomas Thrainer
    # The allocator could choose some of the nodes on which the instance is
581 763ad5be Thomas Thrainer
    # running, but that's not a problem. If the instance nodes are broken,
582 763ad5be Thomas Thrainer
    # they should be already be marked as drained or offline, and hence
583 763ad5be Thomas Thrainer
    # skipped by the allocator. If instance disks have been lost for other
584 763ad5be Thomas Thrainer
    # reasons, then recreating the disks on the same nodes should be fine.
585 763ad5be Thomas Thrainer
    disk_template = self.instance.disk_template
586 763ad5be Thomas Thrainer
    spindle_use = be_full[constants.BE_SPINDLE_USE]
587 0e514de1 Bernardo Dal Seno
    disks = [{
588 0e514de1 Bernardo Dal Seno
      constants.IDISK_SIZE: d.size,
589 0e514de1 Bernardo Dal Seno
      constants.IDISK_MODE: d.mode,
590 0e514de1 Bernardo Dal Seno
      constants.IDISK_SPINDLES: d.spindles,
591 0e514de1 Bernardo Dal Seno
      } for d in self.instance.disks]
592 763ad5be Thomas Thrainer
    req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
593 763ad5be Thomas Thrainer
                                        disk_template=disk_template,
594 763ad5be Thomas Thrainer
                                        tags=list(self.instance.GetTags()),
595 763ad5be Thomas Thrainer
                                        os=self.instance.os,
596 763ad5be Thomas Thrainer
                                        nics=[{}],
597 763ad5be Thomas Thrainer
                                        vcpus=be_full[constants.BE_VCPUS],
598 763ad5be Thomas Thrainer
                                        memory=be_full[constants.BE_MAXMEM],
599 763ad5be Thomas Thrainer
                                        spindle_use=spindle_use,
600 0e514de1 Bernardo Dal Seno
                                        disks=disks,
601 763ad5be Thomas Thrainer
                                        hypervisor=self.instance.hypervisor,
602 763ad5be Thomas Thrainer
                                        node_whitelist=None)
603 763ad5be Thomas Thrainer
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
604 763ad5be Thomas Thrainer
605 763ad5be Thomas Thrainer
    ial.Run(self.op.iallocator)
606 763ad5be Thomas Thrainer
607 763ad5be Thomas Thrainer
    assert req.RequiredNodes() == len(self.instance.all_nodes)
608 763ad5be Thomas Thrainer
609 763ad5be Thomas Thrainer
    if not ial.success:
610 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
611 763ad5be Thomas Thrainer
                                 " %s" % (self.op.iallocator, ial.info),
612 763ad5be Thomas Thrainer
                                 errors.ECODE_NORES)
613 763ad5be Thomas Thrainer
614 1c3231aa Thomas Thrainer
    (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, ial.result)
615 763ad5be Thomas Thrainer
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
616 763ad5be Thomas Thrainer
                 self.op.instance_name, self.op.iallocator,
617 1c3231aa Thomas Thrainer
                 utils.CommaJoin(self.op.nodes))
618 763ad5be Thomas Thrainer
619 763ad5be Thomas Thrainer
  def CheckArguments(self):
620 763ad5be Thomas Thrainer
    if self.op.disks and ht.TNonNegativeInt(self.op.disks[0]):
621 763ad5be Thomas Thrainer
      # Normalize and convert deprecated list of disk indices
622 763ad5be Thomas Thrainer
      self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
623 763ad5be Thomas Thrainer
624 763ad5be Thomas Thrainer
    duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
625 763ad5be Thomas Thrainer
    if duplicates:
626 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Some disks have been specified more than"
627 763ad5be Thomas Thrainer
                                 " once: %s" % utils.CommaJoin(duplicates),
628 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
629 763ad5be Thomas Thrainer
630 763ad5be Thomas Thrainer
    # We don't want _CheckIAllocatorOrNode selecting the default iallocator
631 763ad5be Thomas Thrainer
    # when neither iallocator nor nodes are specified
632 763ad5be Thomas Thrainer
    if self.op.iallocator or self.op.nodes:
633 5eacbcae Thomas Thrainer
      CheckIAllocatorOrNode(self, "iallocator", "nodes")
634 763ad5be Thomas Thrainer
635 763ad5be Thomas Thrainer
    for (idx, params) in self.op.disks:
636 763ad5be Thomas Thrainer
      utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
637 763ad5be Thomas Thrainer
      unsupported = frozenset(params.keys()) - self._MODIFYABLE
638 763ad5be Thomas Thrainer
      if unsupported:
639 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Parameters for disk %s try to change"
640 763ad5be Thomas Thrainer
                                   " unmodifyable parameter(s): %s" %
641 763ad5be Thomas Thrainer
                                   (idx, utils.CommaJoin(unsupported)),
642 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
643 763ad5be Thomas Thrainer
644 763ad5be Thomas Thrainer
  def ExpandNames(self):
645 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
646 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
647 763ad5be Thomas Thrainer
648 763ad5be Thomas Thrainer
    if self.op.nodes:
649 1c3231aa Thomas Thrainer
      (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
650 1c3231aa Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = list(self.op.node_uuids)
651 763ad5be Thomas Thrainer
    else:
652 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = []
653 763ad5be Thomas Thrainer
      if self.op.iallocator:
654 763ad5be Thomas Thrainer
        # iallocator will select a new node in the same group
655 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
656 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
657 763ad5be Thomas Thrainer
658 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
659 763ad5be Thomas Thrainer
660 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
661 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODEGROUP:
662 763ad5be Thomas Thrainer
      assert self.op.iallocator is not None
663 763ad5be Thomas Thrainer
      assert not self.op.nodes
664 763ad5be Thomas Thrainer
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
665 763ad5be Thomas Thrainer
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
666 763ad5be Thomas Thrainer
      # Lock the primary group used by the instance optimistically; this
667 763ad5be Thomas Thrainer
      # requires going via the node before it's locked, requiring
668 763ad5be Thomas Thrainer
      # verification later on
669 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
670 da4a52a3 Thomas Thrainer
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid, primary_only=True)
671 763ad5be Thomas Thrainer
672 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE:
673 763ad5be Thomas Thrainer
      # If an allocator is used, then we lock all the nodes in the current
674 763ad5be Thomas Thrainer
      # instance group, as we don't know yet which ones will be selected;
675 763ad5be Thomas Thrainer
      # if we replace the nodes without using an allocator, locks are
676 763ad5be Thomas Thrainer
      # already declared in ExpandNames; otherwise, we need to lock all the
677 763ad5be Thomas Thrainer
      # instance nodes for disk re-creation
678 763ad5be Thomas Thrainer
      if self.op.iallocator:
679 763ad5be Thomas Thrainer
        assert not self.op.nodes
680 763ad5be Thomas Thrainer
        assert not self.needed_locks[locking.LEVEL_NODE]
681 763ad5be Thomas Thrainer
        assert len(self.owned_locks(locking.LEVEL_NODEGROUP)) == 1
682 763ad5be Thomas Thrainer
683 763ad5be Thomas Thrainer
        # Lock member nodes of the group of the primary node
684 763ad5be Thomas Thrainer
        for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP):
685 763ad5be Thomas Thrainer
          self.needed_locks[locking.LEVEL_NODE].extend(
686 763ad5be Thomas Thrainer
            self.cfg.GetNodeGroup(group_uuid).members)
687 763ad5be Thomas Thrainer
688 763ad5be Thomas Thrainer
        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
689 763ad5be Thomas Thrainer
      elif not self.op.nodes:
690 763ad5be Thomas Thrainer
        self._LockInstancesNodes(primary_only=False)
691 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
692 763ad5be Thomas Thrainer
      # Copy node locks
693 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
694 5eacbcae Thomas Thrainer
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
695 763ad5be Thomas Thrainer
696 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
697 763ad5be Thomas Thrainer
    """Build hooks env.
698 763ad5be Thomas Thrainer

699 763ad5be Thomas Thrainer
    This runs on master, primary and secondary nodes of the instance.
700 763ad5be Thomas Thrainer

701 763ad5be Thomas Thrainer
    """
702 5eacbcae Thomas Thrainer
    return BuildInstanceHookEnvByObject(self, self.instance)
703 763ad5be Thomas Thrainer
704 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
705 763ad5be Thomas Thrainer
    """Build hooks nodes.
706 763ad5be Thomas Thrainer

707 763ad5be Thomas Thrainer
    """
708 763ad5be Thomas Thrainer
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
709 763ad5be Thomas Thrainer
    return (nl, nl)
710 763ad5be Thomas Thrainer
711 763ad5be Thomas Thrainer
  def CheckPrereq(self):
712 763ad5be Thomas Thrainer
    """Check prerequisites.
713 763ad5be Thomas Thrainer

714 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster and is not running.
715 763ad5be Thomas Thrainer

716 763ad5be Thomas Thrainer
    """
717 da4a52a3 Thomas Thrainer
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
718 763ad5be Thomas Thrainer
    assert instance is not None, \
719 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
720 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
721 1c3231aa Thomas Thrainer
      if len(self.op.node_uuids) != len(instance.all_nodes):
722 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
723 763ad5be Thomas Thrainer
                                   " %d replacement nodes were specified" %
724 763ad5be Thomas Thrainer
                                   (instance.name, len(instance.all_nodes),
725 1c3231aa Thomas Thrainer
                                    len(self.op.node_uuids)),
726 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
727 763ad5be Thomas Thrainer
      assert instance.disk_template != constants.DT_DRBD8 or \
728 1c3231aa Thomas Thrainer
             len(self.op.node_uuids) == 2
729 763ad5be Thomas Thrainer
      assert instance.disk_template != constants.DT_PLAIN or \
730 1c3231aa Thomas Thrainer
             len(self.op.node_uuids) == 1
731 1c3231aa Thomas Thrainer
      primary_node = self.op.node_uuids[0]
732 763ad5be Thomas Thrainer
    else:
733 763ad5be Thomas Thrainer
      primary_node = instance.primary_node
734 763ad5be Thomas Thrainer
    if not self.op.iallocator:
735 5eacbcae Thomas Thrainer
      CheckNodeOnline(self, primary_node)
736 763ad5be Thomas Thrainer
737 763ad5be Thomas Thrainer
    if instance.disk_template == constants.DT_DISKLESS:
738 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Instance '%s' has no disks" %
739 763ad5be Thomas Thrainer
                                 self.op.instance_name, errors.ECODE_INVAL)
740 763ad5be Thomas Thrainer
741 763ad5be Thomas Thrainer
    # Verify if node group locks are still correct
742 763ad5be Thomas Thrainer
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
743 763ad5be Thomas Thrainer
    if owned_groups:
744 763ad5be Thomas Thrainer
      # Node group locks are acquired only for the primary node (and only
745 763ad5be Thomas Thrainer
      # when the allocator is used)
746 da4a52a3 Thomas Thrainer
      CheckInstanceNodeGroups(self.cfg, instance.uuid, owned_groups,
747 5eacbcae Thomas Thrainer
                              primary_only=True)
748 763ad5be Thomas Thrainer
749 763ad5be Thomas Thrainer
    # if we replace nodes *and* the old primary is offline, we don't
750 763ad5be Thomas Thrainer
    # check the instance state
751 763ad5be Thomas Thrainer
    old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
752 1c3231aa Thomas Thrainer
    if not ((self.op.iallocator or self.op.node_uuids) and old_pnode.offline):
753 5eacbcae Thomas Thrainer
      CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
754 5eacbcae Thomas Thrainer
                         msg="cannot recreate disks")
755 763ad5be Thomas Thrainer
756 763ad5be Thomas Thrainer
    if self.op.disks:
757 763ad5be Thomas Thrainer
      self.disks = dict(self.op.disks)
758 763ad5be Thomas Thrainer
    else:
759 763ad5be Thomas Thrainer
      self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
760 763ad5be Thomas Thrainer
761 763ad5be Thomas Thrainer
    maxidx = max(self.disks.keys())
762 763ad5be Thomas Thrainer
    if maxidx >= len(instance.disks):
763 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
764 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
765 763ad5be Thomas Thrainer
766 1c3231aa Thomas Thrainer
    if ((self.op.node_uuids or self.op.iallocator) and
767 763ad5be Thomas Thrainer
         sorted(self.disks.keys()) != range(len(instance.disks))):
768 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't recreate disks partially and"
769 763ad5be Thomas Thrainer
                                 " change the nodes at the same time",
770 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
771 763ad5be Thomas Thrainer
772 763ad5be Thomas Thrainer
    self.instance = instance
773 763ad5be Thomas Thrainer
774 763ad5be Thomas Thrainer
    if self.op.iallocator:
775 763ad5be Thomas Thrainer
      self._RunAllocator()
776 763ad5be Thomas Thrainer
      # Release unneeded node and node resource locks
777 1c3231aa Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.node_uuids)
778 1c3231aa Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.node_uuids)
779 5eacbcae Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
780 763ad5be Thomas Thrainer
781 763ad5be Thomas Thrainer
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
782 763ad5be Thomas Thrainer
783 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
784 1c3231aa Thomas Thrainer
      node_uuids = self.op.node_uuids
785 3f3ea14c Bernardo Dal Seno
    else:
786 1c3231aa Thomas Thrainer
      node_uuids = instance.all_nodes
787 3f3ea14c Bernardo Dal Seno
    excl_stor = compat.any(
788 1c3231aa Thomas Thrainer
      rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
789 3f3ea14c Bernardo Dal Seno
      )
790 3f3ea14c Bernardo Dal Seno
    for new_params in self.disks.values():
791 7c848a6a Bernardo Dal Seno
      CheckSpindlesExclusiveStorage(new_params, excl_stor, False)
792 3f3ea14c Bernardo Dal Seno
793 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
794 763ad5be Thomas Thrainer
    """Recreate the disks.
795 763ad5be Thomas Thrainer

796 763ad5be Thomas Thrainer
    """
797 763ad5be Thomas Thrainer
    assert (self.owned_locks(locking.LEVEL_NODE) ==
798 763ad5be Thomas Thrainer
            self.owned_locks(locking.LEVEL_NODE_RES))
799 763ad5be Thomas Thrainer
800 763ad5be Thomas Thrainer
    to_skip = []
801 763ad5be Thomas Thrainer
    mods = [] # keeps track of needed changes
802 763ad5be Thomas Thrainer
803 d0d7d7cf Thomas Thrainer
    for idx, disk in enumerate(self.instance.disks):
804 763ad5be Thomas Thrainer
      try:
805 763ad5be Thomas Thrainer
        changes = self.disks[idx]
806 763ad5be Thomas Thrainer
      except KeyError:
807 763ad5be Thomas Thrainer
        # Disk should not be recreated
808 763ad5be Thomas Thrainer
        to_skip.append(idx)
809 763ad5be Thomas Thrainer
        continue
810 763ad5be Thomas Thrainer
811 763ad5be Thomas Thrainer
      # update secondaries for disks, if needed
812 cd3b4ff4 Helga Velroyen
      if self.op.node_uuids and disk.dev_type == constants.DT_DRBD8:
813 763ad5be Thomas Thrainer
        # need to update the nodes and minors
814 1c3231aa Thomas Thrainer
        assert len(self.op.node_uuids) == 2
815 763ad5be Thomas Thrainer
        assert len(disk.logical_id) == 6 # otherwise disk internals
816 763ad5be Thomas Thrainer
                                         # have changed
817 763ad5be Thomas Thrainer
        (_, _, old_port, _, _, old_secret) = disk.logical_id
818 1c3231aa Thomas Thrainer
        new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids,
819 da4a52a3 Thomas Thrainer
                                                self.instance.uuid)
820 1c3231aa Thomas Thrainer
        new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port,
821 763ad5be Thomas Thrainer
                  new_minors[0], new_minors[1], old_secret)
822 763ad5be Thomas Thrainer
        assert len(disk.logical_id) == len(new_id)
823 763ad5be Thomas Thrainer
      else:
824 763ad5be Thomas Thrainer
        new_id = None
825 763ad5be Thomas Thrainer
826 763ad5be Thomas Thrainer
      mods.append((idx, new_id, changes))
827 763ad5be Thomas Thrainer
828 763ad5be Thomas Thrainer
    # now that we have passed all asserts above, we can apply the mods
829 763ad5be Thomas Thrainer
    # in a single run (to avoid partial changes)
830 763ad5be Thomas Thrainer
    for idx, new_id, changes in mods:
831 d0d7d7cf Thomas Thrainer
      disk = self.instance.disks[idx]
832 763ad5be Thomas Thrainer
      if new_id is not None:
833 cd3b4ff4 Helga Velroyen
        assert disk.dev_type == constants.DT_DRBD8
834 763ad5be Thomas Thrainer
        disk.logical_id = new_id
835 763ad5be Thomas Thrainer
      if changes:
836 763ad5be Thomas Thrainer
        disk.Update(size=changes.get(constants.IDISK_SIZE, None),
837 b54ecf12 Bernardo Dal Seno
                    mode=changes.get(constants.IDISK_MODE, None),
838 b54ecf12 Bernardo Dal Seno
                    spindles=changes.get(constants.IDISK_SPINDLES, None))
839 763ad5be Thomas Thrainer
840 763ad5be Thomas Thrainer
    # change primary node, if needed
841 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
842 d0d7d7cf Thomas Thrainer
      self.instance.primary_node = self.op.node_uuids[0]
843 763ad5be Thomas Thrainer
      self.LogWarning("Changing the instance's nodes, you will have to"
844 763ad5be Thomas Thrainer
                      " remove any disks left on the older nodes manually")
845 763ad5be Thomas Thrainer
846 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
847 d0d7d7cf Thomas Thrainer
      self.cfg.Update(self.instance, feedback_fn)
848 763ad5be Thomas Thrainer
849 763ad5be Thomas Thrainer
    # All touched nodes must be locked
850 763ad5be Thomas Thrainer
    mylocks = self.owned_locks(locking.LEVEL_NODE)
851 d0d7d7cf Thomas Thrainer
    assert mylocks.issuperset(frozenset(self.instance.all_nodes))
852 d0d7d7cf Thomas Thrainer
    new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
853 a365b47f Bernardo Dal Seno
854 a365b47f Bernardo Dal Seno
    # TODO: Release node locks before wiping, or explain why it's not possible
855 a365b47f Bernardo Dal Seno
    if self.cfg.GetClusterInfo().prealloc_wipe_disks:
856 a365b47f Bernardo Dal Seno
      wipedisks = [(idx, disk, 0)
857 d0d7d7cf Thomas Thrainer
                   for (idx, disk) in enumerate(self.instance.disks)
858 a365b47f Bernardo Dal Seno
                   if idx not in to_skip]
859 d0d7d7cf Thomas Thrainer
      WipeOrCleanupDisks(self, self.instance, disks=wipedisks,
860 d0d7d7cf Thomas Thrainer
                         cleanup=new_disks)
861 763ad5be Thomas Thrainer
862 763ad5be Thomas Thrainer
863 d90f0cb4 Helga Velroyen
def _PerformNodeInfoCall(lu, node_uuids, vg):
864 d90f0cb4 Helga Velroyen
  """Prepares the input and performs a node info call.
865 d90f0cb4 Helga Velroyen

866 d90f0cb4 Helga Velroyen
  @type lu: C{LogicalUnit}
867 d90f0cb4 Helga Velroyen
  @param lu: a logical unit from which we get configuration data
868 d90f0cb4 Helga Velroyen
  @type node_uuids: list of string
869 d90f0cb4 Helga Velroyen
  @param node_uuids: list of node UUIDs to perform the call for
870 d90f0cb4 Helga Velroyen
  @type vg: string
871 d90f0cb4 Helga Velroyen
  @param vg: the volume group's name
872 d90f0cb4 Helga Velroyen

873 d90f0cb4 Helga Velroyen
  """
874 d90f0cb4 Helga Velroyen
  lvm_storage_units = [(constants.ST_LVM_VG, vg)]
875 d90f0cb4 Helga Velroyen
  storage_units = rpc.PrepareStorageUnitsForNodes(lu.cfg, lvm_storage_units,
876 d90f0cb4 Helga Velroyen
                                                  node_uuids)
877 d90f0cb4 Helga Velroyen
  hvname = lu.cfg.GetHypervisorType()
878 d90f0cb4 Helga Velroyen
  hvparams = lu.cfg.GetClusterInfo().hvparams
879 d90f0cb4 Helga Velroyen
  nodeinfo = lu.rpc.call_node_info(node_uuids, storage_units,
880 d90f0cb4 Helga Velroyen
                                   [(hvname, hvparams[hvname])])
881 d90f0cb4 Helga Velroyen
  return nodeinfo
882 d90f0cb4 Helga Velroyen
883 d90f0cb4 Helga Velroyen
884 d90f0cb4 Helga Velroyen
def _CheckVgCapacityForNode(node_name, node_info, vg, requested):
885 d90f0cb4 Helga Velroyen
  """Checks the vg capacity for a given node.
886 d90f0cb4 Helga Velroyen

887 d90f0cb4 Helga Velroyen
  @type node_info: tuple (_, list of dicts, _)
888 d90f0cb4 Helga Velroyen
  @param node_info: the result of the node info call for one node
889 d90f0cb4 Helga Velroyen
  @type node_name: string
890 d90f0cb4 Helga Velroyen
  @param node_name: the name of the node
891 d90f0cb4 Helga Velroyen
  @type vg: string
892 d90f0cb4 Helga Velroyen
  @param vg: volume group name
893 d90f0cb4 Helga Velroyen
  @type requested: int
894 d90f0cb4 Helga Velroyen
  @param requested: the amount of disk in MiB to check for
895 d90f0cb4 Helga Velroyen
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
896 d90f0cb4 Helga Velroyen
      or we cannot check the node
897 d90f0cb4 Helga Velroyen

898 d90f0cb4 Helga Velroyen
  """
899 d90f0cb4 Helga Velroyen
  (_, space_info, _) = node_info
900 d90f0cb4 Helga Velroyen
  lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
901 d90f0cb4 Helga Velroyen
      space_info, constants.ST_LVM_VG)
902 d90f0cb4 Helga Velroyen
  if not lvm_vg_info:
903 d90f0cb4 Helga Velroyen
    raise errors.OpPrereqError("Can't retrieve storage information for LVM")
904 d90f0cb4 Helga Velroyen
  vg_free = lvm_vg_info.get("storage_free", None)
905 d90f0cb4 Helga Velroyen
  if not isinstance(vg_free, int):
906 d90f0cb4 Helga Velroyen
    raise errors.OpPrereqError("Can't compute free disk space on node"
907 d90f0cb4 Helga Velroyen
                               " %s for vg %s, result was '%s'" %
908 d90f0cb4 Helga Velroyen
                               (node_name, vg, vg_free), errors.ECODE_ENVIRON)
909 d90f0cb4 Helga Velroyen
  if requested > vg_free:
910 d90f0cb4 Helga Velroyen
    raise errors.OpPrereqError("Not enough disk space on target node %s"
911 d90f0cb4 Helga Velroyen
                               " vg %s: required %d MiB, available %d MiB" %
912 d90f0cb4 Helga Velroyen
                               (node_name, vg, requested, vg_free),
913 d90f0cb4 Helga Velroyen
                               errors.ECODE_NORES)
914 d90f0cb4 Helga Velroyen
915 d90f0cb4 Helga Velroyen
916 1c3231aa Thomas Thrainer
def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
917 763ad5be Thomas Thrainer
  """Checks if nodes have enough free disk space in the specified VG.
918 763ad5be Thomas Thrainer

919 763ad5be Thomas Thrainer
  This function checks if all given nodes have the needed amount of
920 763ad5be Thomas Thrainer
  free disk. In case any node has less disk or we cannot get the
921 763ad5be Thomas Thrainer
  information from the node, this function raises an OpPrereqError
922 763ad5be Thomas Thrainer
  exception.
923 763ad5be Thomas Thrainer

924 763ad5be Thomas Thrainer
  @type lu: C{LogicalUnit}
925 763ad5be Thomas Thrainer
  @param lu: a logical unit from which we get configuration data
926 1c3231aa Thomas Thrainer
  @type node_uuids: C{list}
927 1c3231aa Thomas Thrainer
  @param node_uuids: the list of node UUIDs to check
928 763ad5be Thomas Thrainer
  @type vg: C{str}
929 763ad5be Thomas Thrainer
  @param vg: the volume group to check
930 763ad5be Thomas Thrainer
  @type requested: C{int}
931 763ad5be Thomas Thrainer
  @param requested: the amount of disk in MiB to check for
932 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
933 763ad5be Thomas Thrainer
      or we cannot check the node
934 763ad5be Thomas Thrainer

935 763ad5be Thomas Thrainer
  """
936 d90f0cb4 Helga Velroyen
  nodeinfo = _PerformNodeInfoCall(lu, node_uuids, vg)
937 1c3231aa Thomas Thrainer
  for node in node_uuids:
938 1c3231aa Thomas Thrainer
    node_name = lu.cfg.GetNodeName(node)
939 763ad5be Thomas Thrainer
    info = nodeinfo[node]
940 1c3231aa Thomas Thrainer
    info.Raise("Cannot get current information from node %s" % node_name,
941 763ad5be Thomas Thrainer
               prereq=True, ecode=errors.ECODE_ENVIRON)
942 d90f0cb4 Helga Velroyen
    _CheckVgCapacityForNode(node_name, info.payload, vg, requested)
943 763ad5be Thomas Thrainer
944 763ad5be Thomas Thrainer
945 1c3231aa Thomas Thrainer
def CheckNodesFreeDiskPerVG(lu, node_uuids, req_sizes):
946 763ad5be Thomas Thrainer
  """Checks if nodes have enough free disk space in all the VGs.
947 763ad5be Thomas Thrainer

948 763ad5be Thomas Thrainer
  This function checks if all given nodes have the needed amount of
949 763ad5be Thomas Thrainer
  free disk. In case any node has less disk or we cannot get the
950 763ad5be Thomas Thrainer
  information from the node, this function raises an OpPrereqError
951 763ad5be Thomas Thrainer
  exception.
952 763ad5be Thomas Thrainer

953 763ad5be Thomas Thrainer
  @type lu: C{LogicalUnit}
954 763ad5be Thomas Thrainer
  @param lu: a logical unit from which we get configuration data
955 1c3231aa Thomas Thrainer
  @type node_uuids: C{list}
956 1c3231aa Thomas Thrainer
  @param node_uuids: the list of node UUIDs to check
957 763ad5be Thomas Thrainer
  @type req_sizes: C{dict}
958 763ad5be Thomas Thrainer
  @param req_sizes: the hash of vg and corresponding amount of disk in
959 763ad5be Thomas Thrainer
      MiB to check for
960 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
961 763ad5be Thomas Thrainer
      or we cannot check the node
962 763ad5be Thomas Thrainer

963 763ad5be Thomas Thrainer
  """
964 763ad5be Thomas Thrainer
  for vg, req_size in req_sizes.items():
965 1c3231aa Thomas Thrainer
    _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, req_size)
966 763ad5be Thomas Thrainer
967 763ad5be Thomas Thrainer
968 763ad5be Thomas Thrainer
def _DiskSizeInBytesToMebibytes(lu, size):
969 763ad5be Thomas Thrainer
  """Converts a disk size in bytes to mebibytes.
970 763ad5be Thomas Thrainer

971 763ad5be Thomas Thrainer
  Warns and rounds up if the size isn't an even multiple of 1 MiB.
972 763ad5be Thomas Thrainer

973 763ad5be Thomas Thrainer
  """
974 763ad5be Thomas Thrainer
  (mib, remainder) = divmod(size, 1024 * 1024)
975 763ad5be Thomas Thrainer
976 763ad5be Thomas Thrainer
  if remainder != 0:
977 763ad5be Thomas Thrainer
    lu.LogWarning("Disk size is not an even multiple of 1 MiB; rounding up"
978 763ad5be Thomas Thrainer
                  " to not overwrite existing data (%s bytes will not be"
979 763ad5be Thomas Thrainer
                  " wiped)", (1024 * 1024) - remainder)
980 763ad5be Thomas Thrainer
    mib += 1
981 763ad5be Thomas Thrainer
982 763ad5be Thomas Thrainer
  return mib
983 763ad5be Thomas Thrainer
984 763ad5be Thomas Thrainer
985 763ad5be Thomas Thrainer
def _CalcEta(time_taken, written, total_size):
986 763ad5be Thomas Thrainer
  """Calculates the ETA based on size written and total size.
987 763ad5be Thomas Thrainer

988 763ad5be Thomas Thrainer
  @param time_taken: The time taken so far
989 763ad5be Thomas Thrainer
  @param written: amount written so far
990 763ad5be Thomas Thrainer
  @param total_size: The total size of data to be written
991 763ad5be Thomas Thrainer
  @return: The remaining time in seconds
992 763ad5be Thomas Thrainer

993 763ad5be Thomas Thrainer
  """
994 763ad5be Thomas Thrainer
  avg_time = time_taken / float(written)
995 763ad5be Thomas Thrainer
  return (total_size - written) * avg_time
996 763ad5be Thomas Thrainer
997 763ad5be Thomas Thrainer
998 5eacbcae Thomas Thrainer
def WipeDisks(lu, instance, disks=None):
999 763ad5be Thomas Thrainer
  """Wipes instance disks.
1000 763ad5be Thomas Thrainer

1001 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
1002 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
1003 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
1004 763ad5be Thomas Thrainer
  @param instance: the instance whose disks we should create
1005 763ad5be Thomas Thrainer
  @type disks: None or list of tuple of (number, L{objects.Disk}, number)
1006 763ad5be Thomas Thrainer
  @param disks: Disk details; tuple contains disk index, disk object and the
1007 763ad5be Thomas Thrainer
    start offset
1008 763ad5be Thomas Thrainer

1009 763ad5be Thomas Thrainer
  """
1010 1c3231aa Thomas Thrainer
  node_uuid = instance.primary_node
1011 1c3231aa Thomas Thrainer
  node_name = lu.cfg.GetNodeName(node_uuid)
1012 763ad5be Thomas Thrainer
1013 763ad5be Thomas Thrainer
  if disks is None:
1014 763ad5be Thomas Thrainer
    disks = [(idx, disk, 0)
1015 763ad5be Thomas Thrainer
             for (idx, disk) in enumerate(instance.disks)]
1016 763ad5be Thomas Thrainer
1017 763ad5be Thomas Thrainer
  for (_, device, _) in disks:
1018 1c3231aa Thomas Thrainer
    lu.cfg.SetDiskID(device, node_uuid)
1019 763ad5be Thomas Thrainer
1020 763ad5be Thomas Thrainer
  logging.info("Pausing synchronization of disks of instance '%s'",
1021 763ad5be Thomas Thrainer
               instance.name)
1022 1c3231aa Thomas Thrainer
  result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
1023 763ad5be Thomas Thrainer
                                                  (map(compat.snd, disks),
1024 763ad5be Thomas Thrainer
                                                   instance),
1025 763ad5be Thomas Thrainer
                                                  True)
1026 1c3231aa Thomas Thrainer
  result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
1027 763ad5be Thomas Thrainer
1028 763ad5be Thomas Thrainer
  for idx, success in enumerate(result.payload):
1029 763ad5be Thomas Thrainer
    if not success:
1030 763ad5be Thomas Thrainer
      logging.warn("Pausing synchronization of disk %s of instance '%s'"
1031 763ad5be Thomas Thrainer
                   " failed", idx, instance.name)
1032 763ad5be Thomas Thrainer
1033 763ad5be Thomas Thrainer
  try:
1034 763ad5be Thomas Thrainer
    for (idx, device, offset) in disks:
1035 763ad5be Thomas Thrainer
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
1036 763ad5be Thomas Thrainer
      # MAX_WIPE_CHUNK at max. Truncating to integer to avoid rounding errors.
1037 763ad5be Thomas Thrainer
      wipe_chunk_size = \
1038 763ad5be Thomas Thrainer
        int(min(constants.MAX_WIPE_CHUNK,
1039 763ad5be Thomas Thrainer
                device.size / 100.0 * constants.MIN_WIPE_CHUNK_PERCENT))
1040 763ad5be Thomas Thrainer
1041 763ad5be Thomas Thrainer
      size = device.size
1042 763ad5be Thomas Thrainer
      last_output = 0
1043 763ad5be Thomas Thrainer
      start_time = time.time()
1044 763ad5be Thomas Thrainer
1045 763ad5be Thomas Thrainer
      if offset == 0:
1046 763ad5be Thomas Thrainer
        info_text = ""
1047 763ad5be Thomas Thrainer
      else:
1048 763ad5be Thomas Thrainer
        info_text = (" (from %s to %s)" %
1049 763ad5be Thomas Thrainer
                     (utils.FormatUnit(offset, "h"),
1050 763ad5be Thomas Thrainer
                      utils.FormatUnit(size, "h")))
1051 763ad5be Thomas Thrainer
1052 763ad5be Thomas Thrainer
      lu.LogInfo("* Wiping disk %s%s", idx, info_text)
1053 763ad5be Thomas Thrainer
1054 763ad5be Thomas Thrainer
      logging.info("Wiping disk %d for instance %s on node %s using"
1055 1c3231aa Thomas Thrainer
                   " chunk size %s", idx, instance.name, node_name,
1056 1c3231aa Thomas Thrainer
                   wipe_chunk_size)
1057 763ad5be Thomas Thrainer
1058 763ad5be Thomas Thrainer
      while offset < size:
1059 763ad5be Thomas Thrainer
        wipe_size = min(wipe_chunk_size, size - offset)
1060 763ad5be Thomas Thrainer
1061 763ad5be Thomas Thrainer
        logging.debug("Wiping disk %d, offset %s, chunk %s",
1062 763ad5be Thomas Thrainer
                      idx, offset, wipe_size)
1063 763ad5be Thomas Thrainer
1064 1c3231aa Thomas Thrainer
        result = lu.rpc.call_blockdev_wipe(node_uuid, (device, instance),
1065 1c3231aa Thomas Thrainer
                                           offset, wipe_size)
1066 763ad5be Thomas Thrainer
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
1067 763ad5be Thomas Thrainer
                     (idx, offset, wipe_size))
1068 763ad5be Thomas Thrainer
1069 763ad5be Thomas Thrainer
        now = time.time()
1070 763ad5be Thomas Thrainer
        offset += wipe_size
1071 763ad5be Thomas Thrainer
        if now - last_output >= 60:
1072 763ad5be Thomas Thrainer
          eta = _CalcEta(now - start_time, offset, size)
1073 763ad5be Thomas Thrainer
          lu.LogInfo(" - done: %.1f%% ETA: %s",
1074 763ad5be Thomas Thrainer
                     offset / float(size) * 100, utils.FormatSeconds(eta))
1075 763ad5be Thomas Thrainer
          last_output = now
1076 763ad5be Thomas Thrainer
  finally:
1077 763ad5be Thomas Thrainer
    logging.info("Resuming synchronization of disks for instance '%s'",
1078 763ad5be Thomas Thrainer
                 instance.name)
1079 763ad5be Thomas Thrainer
1080 1c3231aa Thomas Thrainer
    result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
1081 763ad5be Thomas Thrainer
                                                    (map(compat.snd, disks),
1082 763ad5be Thomas Thrainer
                                                     instance),
1083 763ad5be Thomas Thrainer
                                                    False)
1084 763ad5be Thomas Thrainer
1085 763ad5be Thomas Thrainer
    if result.fail_msg:
1086 763ad5be Thomas Thrainer
      lu.LogWarning("Failed to resume disk synchronization on node '%s': %s",
1087 1c3231aa Thomas Thrainer
                    node_name, result.fail_msg)
1088 763ad5be Thomas Thrainer
    else:
1089 763ad5be Thomas Thrainer
      for idx, success in enumerate(result.payload):
1090 763ad5be Thomas Thrainer
        if not success:
1091 763ad5be Thomas Thrainer
          lu.LogWarning("Resuming synchronization of disk %s of instance '%s'"
1092 763ad5be Thomas Thrainer
                        " failed", idx, instance.name)
1093 763ad5be Thomas Thrainer
1094 763ad5be Thomas Thrainer
1095 a365b47f Bernardo Dal Seno
def WipeOrCleanupDisks(lu, instance, disks=None, cleanup=None):
1096 a365b47f Bernardo Dal Seno
  """Wrapper for L{WipeDisks} that handles errors.
1097 a365b47f Bernardo Dal Seno

1098 a365b47f Bernardo Dal Seno
  @type lu: L{LogicalUnit}
1099 a365b47f Bernardo Dal Seno
  @param lu: the logical unit on whose behalf we execute
1100 a365b47f Bernardo Dal Seno
  @type instance: L{objects.Instance}
1101 a365b47f Bernardo Dal Seno
  @param instance: the instance whose disks we should wipe
1102 a365b47f Bernardo Dal Seno
  @param disks: see L{WipeDisks}
1103 a365b47f Bernardo Dal Seno
  @param cleanup: the result returned by L{CreateDisks}, used for cleanup in
1104 a365b47f Bernardo Dal Seno
      case of error
1105 a365b47f Bernardo Dal Seno
  @raise errors.OpPrereqError: in case of failure
1106 a365b47f Bernardo Dal Seno

1107 a365b47f Bernardo Dal Seno
  """
1108 a365b47f Bernardo Dal Seno
  try:
1109 a365b47f Bernardo Dal Seno
    WipeDisks(lu, instance, disks=disks)
1110 a365b47f Bernardo Dal Seno
  except errors.OpExecError:
1111 a365b47f Bernardo Dal Seno
    logging.warning("Wiping disks for instance '%s' failed",
1112 a365b47f Bernardo Dal Seno
                    instance.name)
1113 a365b47f Bernardo Dal Seno
    _UndoCreateDisks(lu, cleanup)
1114 a365b47f Bernardo Dal Seno
    raise
1115 a365b47f Bernardo Dal Seno
1116 a365b47f Bernardo Dal Seno
1117 5eacbcae Thomas Thrainer
def ExpandCheckDisks(instance, disks):
1118 763ad5be Thomas Thrainer
  """Return the instance disks selected by the disks list
1119 763ad5be Thomas Thrainer

1120 763ad5be Thomas Thrainer
  @type disks: list of L{objects.Disk} or None
1121 763ad5be Thomas Thrainer
  @param disks: selected disks
1122 763ad5be Thomas Thrainer
  @rtype: list of L{objects.Disk}
1123 763ad5be Thomas Thrainer
  @return: selected instance disks to act on
1124 763ad5be Thomas Thrainer

1125 763ad5be Thomas Thrainer
  """
1126 763ad5be Thomas Thrainer
  if disks is None:
1127 763ad5be Thomas Thrainer
    return instance.disks
1128 763ad5be Thomas Thrainer
  else:
1129 763ad5be Thomas Thrainer
    if not set(disks).issubset(instance.disks):
1130 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Can only act on disks belonging to the"
1131 328201a5 Guido Trotter
                                   " target instance: expected a subset of %r,"
1132 328201a5 Guido Trotter
                                   " got %r" % (instance.disks, disks))
1133 763ad5be Thomas Thrainer
    return disks
1134 763ad5be Thomas Thrainer
1135 763ad5be Thomas Thrainer
1136 5eacbcae Thomas Thrainer
def WaitForSync(lu, instance, disks=None, oneshot=False):
1137 763ad5be Thomas Thrainer
  """Sleep and poll for an instance's disk to sync.
1138 763ad5be Thomas Thrainer

1139 763ad5be Thomas Thrainer
  """
1140 763ad5be Thomas Thrainer
  if not instance.disks or disks is not None and not disks:
1141 763ad5be Thomas Thrainer
    return True
1142 763ad5be Thomas Thrainer
1143 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1144 763ad5be Thomas Thrainer
1145 763ad5be Thomas Thrainer
  if not oneshot:
1146 763ad5be Thomas Thrainer
    lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
1147 763ad5be Thomas Thrainer
1148 1c3231aa Thomas Thrainer
  node_uuid = instance.primary_node
1149 1c3231aa Thomas Thrainer
  node_name = lu.cfg.GetNodeName(node_uuid)
1150 763ad5be Thomas Thrainer
1151 763ad5be Thomas Thrainer
  for dev in disks:
1152 1c3231aa Thomas Thrainer
    lu.cfg.SetDiskID(dev, node_uuid)
1153 763ad5be Thomas Thrainer
1154 763ad5be Thomas Thrainer
  # TODO: Convert to utils.Retry
1155 763ad5be Thomas Thrainer
1156 763ad5be Thomas Thrainer
  retries = 0
1157 763ad5be Thomas Thrainer
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1158 763ad5be Thomas Thrainer
  while True:
1159 763ad5be Thomas Thrainer
    max_time = 0
1160 763ad5be Thomas Thrainer
    done = True
1161 763ad5be Thomas Thrainer
    cumul_degraded = False
1162 1c3231aa Thomas Thrainer
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node_uuid, (disks, instance))
1163 763ad5be Thomas Thrainer
    msg = rstats.fail_msg
1164 763ad5be Thomas Thrainer
    if msg:
1165 1c3231aa Thomas Thrainer
      lu.LogWarning("Can't get any data from node %s: %s", node_name, msg)
1166 763ad5be Thomas Thrainer
      retries += 1
1167 763ad5be Thomas Thrainer
      if retries >= 10:
1168 763ad5be Thomas Thrainer
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1169 1c3231aa Thomas Thrainer
                                 " aborting." % node_name)
1170 763ad5be Thomas Thrainer
      time.sleep(6)
1171 763ad5be Thomas Thrainer
      continue
1172 763ad5be Thomas Thrainer
    rstats = rstats.payload
1173 763ad5be Thomas Thrainer
    retries = 0
1174 763ad5be Thomas Thrainer
    for i, mstat in enumerate(rstats):
1175 763ad5be Thomas Thrainer
      if mstat is None:
1176 763ad5be Thomas Thrainer
        lu.LogWarning("Can't compute data for node %s/%s",
1177 1c3231aa Thomas Thrainer
                      node_name, disks[i].iv_name)
1178 763ad5be Thomas Thrainer
        continue
1179 763ad5be Thomas Thrainer
1180 763ad5be Thomas Thrainer
      cumul_degraded = (cumul_degraded or
1181 763ad5be Thomas Thrainer
                        (mstat.is_degraded and mstat.sync_percent is None))
1182 763ad5be Thomas Thrainer
      if mstat.sync_percent is not None:
1183 763ad5be Thomas Thrainer
        done = False
1184 763ad5be Thomas Thrainer
        if mstat.estimated_time is not None:
1185 763ad5be Thomas Thrainer
          rem_time = ("%s remaining (estimated)" %
1186 763ad5be Thomas Thrainer
                      utils.FormatSeconds(mstat.estimated_time))
1187 763ad5be Thomas Thrainer
          max_time = mstat.estimated_time
1188 763ad5be Thomas Thrainer
        else:
1189 763ad5be Thomas Thrainer
          rem_time = "no time estimate"
1190 763ad5be Thomas Thrainer
        lu.LogInfo("- device %s: %5.2f%% done, %s",
1191 763ad5be Thomas Thrainer
                   disks[i].iv_name, mstat.sync_percent, rem_time)
1192 763ad5be Thomas Thrainer
1193 763ad5be Thomas Thrainer
    # if we're done but degraded, let's do a few small retries, to
1194 763ad5be Thomas Thrainer
    # make sure we see a stable and not transient situation; therefore
1195 763ad5be Thomas Thrainer
    # we force restart of the loop
1196 763ad5be Thomas Thrainer
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1197 763ad5be Thomas Thrainer
      logging.info("Degraded disks found, %d retries left", degr_retries)
1198 763ad5be Thomas Thrainer
      degr_retries -= 1
1199 763ad5be Thomas Thrainer
      time.sleep(1)
1200 763ad5be Thomas Thrainer
      continue
1201 763ad5be Thomas Thrainer
1202 763ad5be Thomas Thrainer
    if done or oneshot:
1203 763ad5be Thomas Thrainer
      break
1204 763ad5be Thomas Thrainer
1205 763ad5be Thomas Thrainer
    time.sleep(min(60, max_time))
1206 763ad5be Thomas Thrainer
1207 763ad5be Thomas Thrainer
  if done:
1208 763ad5be Thomas Thrainer
    lu.LogInfo("Instance %s's disks are in sync", instance.name)
1209 763ad5be Thomas Thrainer
1210 763ad5be Thomas Thrainer
  return not cumul_degraded
1211 763ad5be Thomas Thrainer
1212 763ad5be Thomas Thrainer
1213 5eacbcae Thomas Thrainer
def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
1214 763ad5be Thomas Thrainer
  """Shutdown block devices of an instance.
1215 763ad5be Thomas Thrainer

1216 763ad5be Thomas Thrainer
  This does the shutdown on all nodes of the instance.
1217 763ad5be Thomas Thrainer

1218 763ad5be Thomas Thrainer
  If the ignore_primary is false, errors on the primary node are
1219 763ad5be Thomas Thrainer
  ignored.
1220 763ad5be Thomas Thrainer

1221 763ad5be Thomas Thrainer
  """
1222 da4a52a3 Thomas Thrainer
  lu.cfg.MarkInstanceDisksInactive(instance.uuid)
1223 763ad5be Thomas Thrainer
  all_result = True
1224 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1225 763ad5be Thomas Thrainer
1226 763ad5be Thomas Thrainer
  for disk in disks:
1227 1c3231aa Thomas Thrainer
    for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
1228 1c3231aa Thomas Thrainer
      lu.cfg.SetDiskID(top_disk, node_uuid)
1229 1c3231aa Thomas Thrainer
      result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance))
1230 763ad5be Thomas Thrainer
      msg = result.fail_msg
1231 763ad5be Thomas Thrainer
      if msg:
1232 763ad5be Thomas Thrainer
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
1233 1c3231aa Thomas Thrainer
                      disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
1234 1c3231aa Thomas Thrainer
        if ((node_uuid == instance.primary_node and not ignore_primary) or
1235 1c3231aa Thomas Thrainer
            (node_uuid != instance.primary_node and not result.offline)):
1236 763ad5be Thomas Thrainer
          all_result = False
1237 763ad5be Thomas Thrainer
  return all_result
1238 763ad5be Thomas Thrainer
1239 763ad5be Thomas Thrainer
1240 763ad5be Thomas Thrainer
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
1241 763ad5be Thomas Thrainer
  """Shutdown block devices of an instance.
1242 763ad5be Thomas Thrainer

1243 763ad5be Thomas Thrainer
  This function checks if an instance is running, before calling
1244 763ad5be Thomas Thrainer
  _ShutdownInstanceDisks.
1245 763ad5be Thomas Thrainer

1246 763ad5be Thomas Thrainer
  """
1247 5eacbcae Thomas Thrainer
  CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
1248 5eacbcae Thomas Thrainer
  ShutdownInstanceDisks(lu, instance, disks=disks)
1249 763ad5be Thomas Thrainer
1250 763ad5be Thomas Thrainer
1251 5eacbcae Thomas Thrainer
def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
1252 763ad5be Thomas Thrainer
                           ignore_size=False):
1253 763ad5be Thomas Thrainer
  """Prepare the block devices for an instance.
1254 763ad5be Thomas Thrainer

1255 763ad5be Thomas Thrainer
  This sets up the block devices on all nodes.
1256 763ad5be Thomas Thrainer

1257 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
1258 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
1259 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
1260 763ad5be Thomas Thrainer
  @param instance: the instance for whose disks we assemble
1261 763ad5be Thomas Thrainer
  @type disks: list of L{objects.Disk} or None
1262 763ad5be Thomas Thrainer
  @param disks: which disks to assemble (or all, if None)
1263 763ad5be Thomas Thrainer
  @type ignore_secondaries: boolean
1264 763ad5be Thomas Thrainer
  @param ignore_secondaries: if true, errors on secondary nodes
1265 763ad5be Thomas Thrainer
      won't result in an error return from the function
1266 763ad5be Thomas Thrainer
  @type ignore_size: boolean
1267 763ad5be Thomas Thrainer
  @param ignore_size: if true, the current known size of the disk
1268 763ad5be Thomas Thrainer
      will not be used during the disk activation, useful for cases
1269 763ad5be Thomas Thrainer
      when the size is wrong
1270 763ad5be Thomas Thrainer
  @return: False if the operation failed, otherwise a list of
1271 763ad5be Thomas Thrainer
      (host, instance_visible_name, node_visible_name)
1272 763ad5be Thomas Thrainer
      with the mapping from node devices to instance devices
1273 763ad5be Thomas Thrainer

1274 763ad5be Thomas Thrainer
  """
1275 763ad5be Thomas Thrainer
  device_info = []
1276 763ad5be Thomas Thrainer
  disks_ok = True
1277 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1278 763ad5be Thomas Thrainer
1279 763ad5be Thomas Thrainer
  # With the two passes mechanism we try to reduce the window of
1280 763ad5be Thomas Thrainer
  # opportunity for the race condition of switching DRBD to primary
1281 763ad5be Thomas Thrainer
  # before handshaking occured, but we do not eliminate it
1282 763ad5be Thomas Thrainer
1283 763ad5be Thomas Thrainer
  # The proper fix would be to wait (with some limits) until the
1284 763ad5be Thomas Thrainer
  # connection has been made and drbd transitions from WFConnection
1285 763ad5be Thomas Thrainer
  # into any other network-connected state (Connected, SyncTarget,
1286 763ad5be Thomas Thrainer
  # SyncSource, etc.)
1287 763ad5be Thomas Thrainer
1288 1d4a4b26 Thomas Thrainer
  # mark instance disks as active before doing actual work, so watcher does
1289 1d4a4b26 Thomas Thrainer
  # not try to shut them down erroneously
1290 da4a52a3 Thomas Thrainer
  lu.cfg.MarkInstanceDisksActive(instance.uuid)
1291 1d4a4b26 Thomas Thrainer
1292 763ad5be Thomas Thrainer
  # 1st pass, assemble on all nodes in secondary mode
1293 763ad5be Thomas Thrainer
  for idx, inst_disk in enumerate(disks):
1294 1c3231aa Thomas Thrainer
    for node_uuid, node_disk in inst_disk.ComputeNodeTree(
1295 1c3231aa Thomas Thrainer
                                  instance.primary_node):
1296 763ad5be Thomas Thrainer
      if ignore_size:
1297 763ad5be Thomas Thrainer
        node_disk = node_disk.Copy()
1298 763ad5be Thomas Thrainer
        node_disk.UnsetSize()
1299 1c3231aa Thomas Thrainer
      lu.cfg.SetDiskID(node_disk, node_uuid)
1300 1c3231aa Thomas Thrainer
      result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
1301 da4a52a3 Thomas Thrainer
                                             instance.name, False, idx)
1302 763ad5be Thomas Thrainer
      msg = result.fail_msg
1303 763ad5be Thomas Thrainer
      if msg:
1304 1c3231aa Thomas Thrainer
        is_offline_secondary = (node_uuid in instance.secondary_nodes and
1305 763ad5be Thomas Thrainer
                                result.offline)
1306 763ad5be Thomas Thrainer
        lu.LogWarning("Could not prepare block device %s on node %s"
1307 763ad5be Thomas Thrainer
                      " (is_primary=False, pass=1): %s",
1308 1c3231aa Thomas Thrainer
                      inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
1309 763ad5be Thomas Thrainer
        if not (ignore_secondaries or is_offline_secondary):
1310 763ad5be Thomas Thrainer
          disks_ok = False
1311 763ad5be Thomas Thrainer
1312 763ad5be Thomas Thrainer
  # FIXME: race condition on drbd migration to primary
1313 763ad5be Thomas Thrainer
1314 763ad5be Thomas Thrainer
  # 2nd pass, do only the primary node
1315 763ad5be Thomas Thrainer
  for idx, inst_disk in enumerate(disks):
1316 763ad5be Thomas Thrainer
    dev_path = None
1317 763ad5be Thomas Thrainer
1318 1c3231aa Thomas Thrainer
    for node_uuid, node_disk in inst_disk.ComputeNodeTree(
1319 1c3231aa Thomas Thrainer
                                  instance.primary_node):
1320 1c3231aa Thomas Thrainer
      if node_uuid != instance.primary_node:
1321 763ad5be Thomas Thrainer
        continue
1322 763ad5be Thomas Thrainer
      if ignore_size:
1323 763ad5be Thomas Thrainer
        node_disk = node_disk.Copy()
1324 763ad5be Thomas Thrainer
        node_disk.UnsetSize()
1325 1c3231aa Thomas Thrainer
      lu.cfg.SetDiskID(node_disk, node_uuid)
1326 1c3231aa Thomas Thrainer
      result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
1327 da4a52a3 Thomas Thrainer
                                             instance.name, True, idx)
1328 763ad5be Thomas Thrainer
      msg = result.fail_msg
1329 763ad5be Thomas Thrainer
      if msg:
1330 763ad5be Thomas Thrainer
        lu.LogWarning("Could not prepare block device %s on node %s"
1331 763ad5be Thomas Thrainer
                      " (is_primary=True, pass=2): %s",
1332 1c3231aa Thomas Thrainer
                      inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
1333 763ad5be Thomas Thrainer
        disks_ok = False
1334 763ad5be Thomas Thrainer
      else:
1335 763ad5be Thomas Thrainer
        dev_path = result.payload
1336 763ad5be Thomas Thrainer
1337 1c3231aa Thomas Thrainer
    device_info.append((lu.cfg.GetNodeName(instance.primary_node),
1338 1c3231aa Thomas Thrainer
                        inst_disk.iv_name, dev_path))
1339 763ad5be Thomas Thrainer
1340 763ad5be Thomas Thrainer
  # leave the disks configured for the primary node
1341 763ad5be Thomas Thrainer
  # this is a workaround that would be fixed better by
1342 763ad5be Thomas Thrainer
  # improving the logical/physical id handling
1343 763ad5be Thomas Thrainer
  for disk in disks:
1344 763ad5be Thomas Thrainer
    lu.cfg.SetDiskID(disk, instance.primary_node)
1345 763ad5be Thomas Thrainer
1346 1d4a4b26 Thomas Thrainer
  if not disks_ok:
1347 da4a52a3 Thomas Thrainer
    lu.cfg.MarkInstanceDisksInactive(instance.uuid)
1348 1d4a4b26 Thomas Thrainer
1349 763ad5be Thomas Thrainer
  return disks_ok, device_info
1350 763ad5be Thomas Thrainer
1351 763ad5be Thomas Thrainer
1352 5eacbcae Thomas Thrainer
def StartInstanceDisks(lu, instance, force):
1353 763ad5be Thomas Thrainer
  """Start the disks of an instance.
1354 763ad5be Thomas Thrainer

1355 763ad5be Thomas Thrainer
  """
1356 5eacbcae Thomas Thrainer
  disks_ok, _ = AssembleInstanceDisks(lu, instance,
1357 5eacbcae Thomas Thrainer
                                      ignore_secondaries=force)
1358 763ad5be Thomas Thrainer
  if not disks_ok:
1359 5eacbcae Thomas Thrainer
    ShutdownInstanceDisks(lu, instance)
1360 763ad5be Thomas Thrainer
    if force is not None and not force:
1361 763ad5be Thomas Thrainer
      lu.LogWarning("",
1362 763ad5be Thomas Thrainer
                    hint=("If the message above refers to a secondary node,"
1363 763ad5be Thomas Thrainer
                          " you can retry the operation using '--force'"))
1364 763ad5be Thomas Thrainer
    raise errors.OpExecError("Disk consistency error")
1365 763ad5be Thomas Thrainer
1366 763ad5be Thomas Thrainer
1367 763ad5be Thomas Thrainer
class LUInstanceGrowDisk(LogicalUnit):
1368 763ad5be Thomas Thrainer
  """Grow a disk of an instance.
1369 763ad5be Thomas Thrainer

1370 763ad5be Thomas Thrainer
  """
1371 763ad5be Thomas Thrainer
  HPATH = "disk-grow"
1372 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
1373 763ad5be Thomas Thrainer
  REQ_BGL = False
1374 763ad5be Thomas Thrainer
1375 763ad5be Thomas Thrainer
  def ExpandNames(self):
1376 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1377 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1378 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1379 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1380 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
1381 763ad5be Thomas Thrainer
1382 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1383 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1384 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1385 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
1386 763ad5be Thomas Thrainer
      # Copy node locks
1387 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1388 5eacbcae Thomas Thrainer
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1389 763ad5be Thomas Thrainer
1390 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
1391 763ad5be Thomas Thrainer
    """Build hooks env.
1392 763ad5be Thomas Thrainer

1393 763ad5be Thomas Thrainer
    This runs on the master, the primary and all the secondaries.
1394 763ad5be Thomas Thrainer

1395 763ad5be Thomas Thrainer
    """
1396 763ad5be Thomas Thrainer
    env = {
1397 763ad5be Thomas Thrainer
      "DISK": self.op.disk,
1398 763ad5be Thomas Thrainer
      "AMOUNT": self.op.amount,
1399 763ad5be Thomas Thrainer
      "ABSOLUTE": self.op.absolute,
1400 763ad5be Thomas Thrainer
      }
1401 5eacbcae Thomas Thrainer
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1402 763ad5be Thomas Thrainer
    return env
1403 763ad5be Thomas Thrainer
1404 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
1405 763ad5be Thomas Thrainer
    """Build hooks nodes.
1406 763ad5be Thomas Thrainer

1407 763ad5be Thomas Thrainer
    """
1408 763ad5be Thomas Thrainer
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1409 763ad5be Thomas Thrainer
    return (nl, nl)
1410 763ad5be Thomas Thrainer
1411 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1412 763ad5be Thomas Thrainer
    """Check prerequisites.
1413 763ad5be Thomas Thrainer

1414 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1415 763ad5be Thomas Thrainer

1416 763ad5be Thomas Thrainer
    """
1417 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1418 da4a52a3 Thomas Thrainer
    assert self.instance is not None, \
1419 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1420 da4a52a3 Thomas Thrainer
    node_uuids = list(self.instance.all_nodes)
1421 1c3231aa Thomas Thrainer
    for node_uuid in node_uuids:
1422 1c3231aa Thomas Thrainer
      CheckNodeOnline(self, node_uuid)
1423 e43a624e Bernardo Dal Seno
    self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
1424 763ad5be Thomas Thrainer
1425 da4a52a3 Thomas Thrainer
    if self.instance.disk_template not in constants.DTS_GROWABLE:
1426 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Instance's disk layout does not support"
1427 763ad5be Thomas Thrainer
                                 " growing", errors.ECODE_INVAL)
1428 763ad5be Thomas Thrainer
1429 da4a52a3 Thomas Thrainer
    self.disk = self.instance.FindDisk(self.op.disk)
1430 763ad5be Thomas Thrainer
1431 763ad5be Thomas Thrainer
    if self.op.absolute:
1432 763ad5be Thomas Thrainer
      self.target = self.op.amount
1433 763ad5be Thomas Thrainer
      self.delta = self.target - self.disk.size
1434 763ad5be Thomas Thrainer
      if self.delta < 0:
1435 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Requested size (%s) is smaller than "
1436 763ad5be Thomas Thrainer
                                   "current disk size (%s)" %
1437 763ad5be Thomas Thrainer
                                   (utils.FormatUnit(self.target, "h"),
1438 763ad5be Thomas Thrainer
                                    utils.FormatUnit(self.disk.size, "h")),
1439 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
1440 763ad5be Thomas Thrainer
    else:
1441 763ad5be Thomas Thrainer
      self.delta = self.op.amount
1442 763ad5be Thomas Thrainer
      self.target = self.disk.size + self.delta
1443 763ad5be Thomas Thrainer
      if self.delta < 0:
1444 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Requested increment (%s) is negative" %
1445 763ad5be Thomas Thrainer
                                   utils.FormatUnit(self.delta, "h"),
1446 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
1447 763ad5be Thomas Thrainer
1448 1c3231aa Thomas Thrainer
    self._CheckDiskSpace(node_uuids, self.disk.ComputeGrowth(self.delta))
1449 763ad5be Thomas Thrainer
1450 1c3231aa Thomas Thrainer
  def _CheckDiskSpace(self, node_uuids, req_vgspace):
1451 763ad5be Thomas Thrainer
    template = self.instance.disk_template
1452 8e5a911a Bernardo Dal Seno
    if (template not in (constants.DTS_NO_FREE_SPACE_CHECK) and
1453 8e5a911a Bernardo Dal Seno
        not any(self.node_es_flags.values())):
1454 763ad5be Thomas Thrainer
      # TODO: check the free disk space for file, when that feature will be
1455 763ad5be Thomas Thrainer
      # supported
1456 8e5a911a Bernardo Dal Seno
      # With exclusive storage we need to do something smarter than just looking
1457 8e5a911a Bernardo Dal Seno
      # at free space, which, in the end, is basically a dry run. So we rely on
1458 8e5a911a Bernardo Dal Seno
      # the dry run performed in Exec() instead.
1459 1c3231aa Thomas Thrainer
      CheckNodesFreeDiskPerVG(self, node_uuids, req_vgspace)
1460 763ad5be Thomas Thrainer
1461 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1462 763ad5be Thomas Thrainer
    """Execute disk grow.
1463 763ad5be Thomas Thrainer

1464 763ad5be Thomas Thrainer
    """
1465 d0d7d7cf Thomas Thrainer
    assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1466 763ad5be Thomas Thrainer
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1467 763ad5be Thomas Thrainer
            self.owned_locks(locking.LEVEL_NODE_RES))
1468 763ad5be Thomas Thrainer
1469 763ad5be Thomas Thrainer
    wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
1470 763ad5be Thomas Thrainer
1471 d0d7d7cf Thomas Thrainer
    disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[self.disk])
1472 763ad5be Thomas Thrainer
    if not disks_ok:
1473 763ad5be Thomas Thrainer
      raise errors.OpExecError("Cannot activate block device to grow")
1474 763ad5be Thomas Thrainer
1475 763ad5be Thomas Thrainer
    feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
1476 d0d7d7cf Thomas Thrainer
                (self.op.disk, self.instance.name,
1477 763ad5be Thomas Thrainer
                 utils.FormatUnit(self.delta, "h"),
1478 763ad5be Thomas Thrainer
                 utils.FormatUnit(self.target, "h")))
1479 763ad5be Thomas Thrainer
1480 763ad5be Thomas Thrainer
    # First run all grow ops in dry-run mode
1481 d0d7d7cf Thomas Thrainer
    for node_uuid in self.instance.all_nodes:
1482 d0d7d7cf Thomas Thrainer
      self.cfg.SetDiskID(self.disk, node_uuid)
1483 d0d7d7cf Thomas Thrainer
      result = self.rpc.call_blockdev_grow(node_uuid,
1484 d0d7d7cf Thomas Thrainer
                                           (self.disk, self.instance),
1485 e43a624e Bernardo Dal Seno
                                           self.delta, True, True,
1486 e43a624e Bernardo Dal Seno
                                           self.node_es_flags[node_uuid])
1487 1c3231aa Thomas Thrainer
      result.Raise("Dry-run grow request failed to node %s" %
1488 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(node_uuid))
1489 763ad5be Thomas Thrainer
1490 763ad5be Thomas Thrainer
    if wipe_disks:
1491 763ad5be Thomas Thrainer
      # Get disk size from primary node for wiping
1492 dad226e3 Thomas Thrainer
      self.cfg.SetDiskID(self.disk, self.instance.primary_node)
1493 d0d7d7cf Thomas Thrainer
      result = self.rpc.call_blockdev_getdimensions(self.instance.primary_node,
1494 d0d7d7cf Thomas Thrainer
                                                    [self.disk])
1495 763ad5be Thomas Thrainer
      result.Raise("Failed to retrieve disk size from node '%s'" %
1496 d0d7d7cf Thomas Thrainer
                   self.instance.primary_node)
1497 763ad5be Thomas Thrainer
1498 6ef8077e Bernardo Dal Seno
      (disk_dimensions, ) = result.payload
1499 763ad5be Thomas Thrainer
1500 6ef8077e Bernardo Dal Seno
      if disk_dimensions is None:
1501 763ad5be Thomas Thrainer
        raise errors.OpExecError("Failed to retrieve disk size from primary"
1502 d0d7d7cf Thomas Thrainer
                                 " node '%s'" % self.instance.primary_node)
1503 6ef8077e Bernardo Dal Seno
      (disk_size_in_bytes, _) = disk_dimensions
1504 763ad5be Thomas Thrainer
1505 763ad5be Thomas Thrainer
      old_disk_size = _DiskSizeInBytesToMebibytes(self, disk_size_in_bytes)
1506 763ad5be Thomas Thrainer
1507 d0d7d7cf Thomas Thrainer
      assert old_disk_size >= self.disk.size, \
1508 763ad5be Thomas Thrainer
        ("Retrieved disk size too small (got %s, should be at least %s)" %
1509 d0d7d7cf Thomas Thrainer
         (old_disk_size, self.disk.size))
1510 763ad5be Thomas Thrainer
    else:
1511 763ad5be Thomas Thrainer
      old_disk_size = None
1512 763ad5be Thomas Thrainer
1513 763ad5be Thomas Thrainer
    # We know that (as far as we can test) operations across different
1514 763ad5be Thomas Thrainer
    # nodes will succeed, time to run it for real on the backing storage
1515 d0d7d7cf Thomas Thrainer
    for node_uuid in self.instance.all_nodes:
1516 d0d7d7cf Thomas Thrainer
      self.cfg.SetDiskID(self.disk, node_uuid)
1517 d0d7d7cf Thomas Thrainer
      result = self.rpc.call_blockdev_grow(node_uuid,
1518 d0d7d7cf Thomas Thrainer
                                           (self.disk, self.instance),
1519 e43a624e Bernardo Dal Seno
                                           self.delta, False, True,
1520 e43a624e Bernardo Dal Seno
                                           self.node_es_flags[node_uuid])
1521 1c3231aa Thomas Thrainer
      result.Raise("Grow request failed to node %s" %
1522 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(node_uuid))
1523 763ad5be Thomas Thrainer
1524 763ad5be Thomas Thrainer
    # And now execute it for logical storage, on the primary node
1525 d0d7d7cf Thomas Thrainer
    node_uuid = self.instance.primary_node
1526 d0d7d7cf Thomas Thrainer
    self.cfg.SetDiskID(self.disk, node_uuid)
1527 d0d7d7cf Thomas Thrainer
    result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance),
1528 e43a624e Bernardo Dal Seno
                                         self.delta, False, False,
1529 e43a624e Bernardo Dal Seno
                                         self.node_es_flags[node_uuid])
1530 1c3231aa Thomas Thrainer
    result.Raise("Grow request failed to node %s" %
1531 1c3231aa Thomas Thrainer
                 self.cfg.GetNodeName(node_uuid))
1532 763ad5be Thomas Thrainer
1533 d0d7d7cf Thomas Thrainer
    self.disk.RecordGrow(self.delta)
1534 d0d7d7cf Thomas Thrainer
    self.cfg.Update(self.instance, feedback_fn)
1535 763ad5be Thomas Thrainer
1536 763ad5be Thomas Thrainer
    # Changes have been recorded, release node lock
1537 5eacbcae Thomas Thrainer
    ReleaseLocks(self, locking.LEVEL_NODE)
1538 763ad5be Thomas Thrainer
1539 763ad5be Thomas Thrainer
    # Downgrade lock while waiting for sync
1540 763ad5be Thomas Thrainer
    self.glm.downgrade(locking.LEVEL_INSTANCE)
1541 763ad5be Thomas Thrainer
1542 763ad5be Thomas Thrainer
    assert wipe_disks ^ (old_disk_size is None)
1543 763ad5be Thomas Thrainer
1544 763ad5be Thomas Thrainer
    if wipe_disks:
1545 d0d7d7cf Thomas Thrainer
      assert self.instance.disks[self.op.disk] == self.disk
1546 763ad5be Thomas Thrainer
1547 763ad5be Thomas Thrainer
      # Wipe newly added disk space
1548 d0d7d7cf Thomas Thrainer
      WipeDisks(self, self.instance,
1549 d0d7d7cf Thomas Thrainer
                disks=[(self.op.disk, self.disk, old_disk_size)])
1550 763ad5be Thomas Thrainer
1551 763ad5be Thomas Thrainer
    if self.op.wait_for_sync:
1552 d0d7d7cf Thomas Thrainer
      disk_abort = not WaitForSync(self, self.instance, disks=[self.disk])
1553 763ad5be Thomas Thrainer
      if disk_abort:
1554 763ad5be Thomas Thrainer
        self.LogWarning("Disk syncing has not returned a good status; check"
1555 763ad5be Thomas Thrainer
                        " the instance")
1556 d0d7d7cf Thomas Thrainer
      if not self.instance.disks_active:
1557 d0d7d7cf Thomas Thrainer
        _SafeShutdownInstanceDisks(self, self.instance, disks=[self.disk])
1558 d0d7d7cf Thomas Thrainer
    elif not self.instance.disks_active:
1559 763ad5be Thomas Thrainer
      self.LogWarning("Not shutting down the disk even if the instance is"
1560 763ad5be Thomas Thrainer
                      " not supposed to be running because no wait for"
1561 763ad5be Thomas Thrainer
                      " sync mode was requested")
1562 763ad5be Thomas Thrainer
1563 763ad5be Thomas Thrainer
    assert self.owned_locks(locking.LEVEL_NODE_RES)
1564 d0d7d7cf Thomas Thrainer
    assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1565 763ad5be Thomas Thrainer
1566 763ad5be Thomas Thrainer
1567 763ad5be Thomas Thrainer
class LUInstanceReplaceDisks(LogicalUnit):
1568 763ad5be Thomas Thrainer
  """Replace the disks of an instance.
1569 763ad5be Thomas Thrainer

1570 763ad5be Thomas Thrainer
  """
1571 763ad5be Thomas Thrainer
  HPATH = "mirrors-replace"
1572 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
1573 763ad5be Thomas Thrainer
  REQ_BGL = False
1574 763ad5be Thomas Thrainer
1575 763ad5be Thomas Thrainer
  def CheckArguments(self):
1576 763ad5be Thomas Thrainer
    """Check arguments.
1577 763ad5be Thomas Thrainer

1578 763ad5be Thomas Thrainer
    """
1579 763ad5be Thomas Thrainer
    if self.op.mode == constants.REPLACE_DISK_CHG:
1580 d0d7d7cf Thomas Thrainer
      if self.op.remote_node is None and self.op.iallocator is None:
1581 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("When changing the secondary either an"
1582 763ad5be Thomas Thrainer
                                   " iallocator script must be used or the"
1583 763ad5be Thomas Thrainer
                                   " new node given", errors.ECODE_INVAL)
1584 763ad5be Thomas Thrainer
      else:
1585 5eacbcae Thomas Thrainer
        CheckIAllocatorOrNode(self, "iallocator", "remote_node")
1586 763ad5be Thomas Thrainer
1587 d0d7d7cf Thomas Thrainer
    elif self.op.remote_node is not None or self.op.iallocator is not None:
1588 763ad5be Thomas Thrainer
      # Not replacing the secondary
1589 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The iallocator and new node options can"
1590 763ad5be Thomas Thrainer
                                 " only be used when changing the"
1591 763ad5be Thomas Thrainer
                                 " secondary node", errors.ECODE_INVAL)
1592 763ad5be Thomas Thrainer
1593 763ad5be Thomas Thrainer
  def ExpandNames(self):
1594 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1595 763ad5be Thomas Thrainer
1596 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODE not in self.needed_locks
1597 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODE_RES not in self.needed_locks
1598 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODEGROUP not in self.needed_locks
1599 763ad5be Thomas Thrainer
1600 763ad5be Thomas Thrainer
    assert self.op.iallocator is None or self.op.remote_node is None, \
1601 763ad5be Thomas Thrainer
      "Conflicting options"
1602 763ad5be Thomas Thrainer
1603 763ad5be Thomas Thrainer
    if self.op.remote_node is not None:
1604 1c3231aa Thomas Thrainer
      (self.op.remote_node_uuid, self.op.remote_node) = \
1605 1c3231aa Thomas Thrainer
        ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
1606 1c3231aa Thomas Thrainer
                              self.op.remote_node)
1607 763ad5be Thomas Thrainer
1608 763ad5be Thomas Thrainer
      # Warning: do not remove the locking of the new secondary here
1609 1bb99a33 Bernardo Dal Seno
      # unless DRBD8Dev.AddChildren is changed to work in parallel;
1610 763ad5be Thomas Thrainer
      # currently it doesn't since parallel invocations of
1611 763ad5be Thomas Thrainer
      # FindUnusedMinor will conflict
1612 1c3231aa Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node_uuid]
1613 763ad5be Thomas Thrainer
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1614 763ad5be Thomas Thrainer
    else:
1615 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = []
1616 763ad5be Thomas Thrainer
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1617 763ad5be Thomas Thrainer
1618 763ad5be Thomas Thrainer
      if self.op.iallocator is not None:
1619 763ad5be Thomas Thrainer
        # iallocator will select a new node in the same group
1620 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
1621 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
1622 763ad5be Thomas Thrainer
1623 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1624 763ad5be Thomas Thrainer
1625 da4a52a3 Thomas Thrainer
    self.replacer = TLReplaceDisks(self, self.op.instance_uuid,
1626 da4a52a3 Thomas Thrainer
                                   self.op.instance_name, self.op.mode,
1627 1c3231aa Thomas Thrainer
                                   self.op.iallocator, self.op.remote_node_uuid,
1628 763ad5be Thomas Thrainer
                                   self.op.disks, self.op.early_release,
1629 763ad5be Thomas Thrainer
                                   self.op.ignore_ipolicy)
1630 763ad5be Thomas Thrainer
1631 763ad5be Thomas Thrainer
    self.tasklets = [self.replacer]
1632 763ad5be Thomas Thrainer
1633 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1634 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODEGROUP:
1635 1c3231aa Thomas Thrainer
      assert self.op.remote_node_uuid is None
1636 763ad5be Thomas Thrainer
      assert self.op.iallocator is not None
1637 763ad5be Thomas Thrainer
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
1638 763ad5be Thomas Thrainer
1639 763ad5be Thomas Thrainer
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
1640 763ad5be Thomas Thrainer
      # Lock all groups used by instance optimistically; this requires going
1641 763ad5be Thomas Thrainer
      # via the node before it's locked, requiring verification later on
1642 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
1643 da4a52a3 Thomas Thrainer
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
1644 763ad5be Thomas Thrainer
1645 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE:
1646 763ad5be Thomas Thrainer
      if self.op.iallocator is not None:
1647 1c3231aa Thomas Thrainer
        assert self.op.remote_node_uuid is None
1648 763ad5be Thomas Thrainer
        assert not self.needed_locks[locking.LEVEL_NODE]
1649 763ad5be Thomas Thrainer
        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
1650 763ad5be Thomas Thrainer
1651 763ad5be Thomas Thrainer
        # Lock member nodes of all locked groups
1652 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE] = \
1653 1c3231aa Thomas Thrainer
          [node_uuid
1654 763ad5be Thomas Thrainer
           for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
1655 1c3231aa Thomas Thrainer
           for node_uuid in self.cfg.GetNodeGroup(group_uuid).members]
1656 763ad5be Thomas Thrainer
      else:
1657 763ad5be Thomas Thrainer
        assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1658 763ad5be Thomas Thrainer
1659 763ad5be Thomas Thrainer
        self._LockInstancesNodes()
1660 763ad5be Thomas Thrainer
1661 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
1662 763ad5be Thomas Thrainer
      # Reuse node locks
1663 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1664 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE]
1665 763ad5be Thomas Thrainer
1666 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
1667 763ad5be Thomas Thrainer
    """Build hooks env.
1668 763ad5be Thomas Thrainer

1669 763ad5be Thomas Thrainer
    This runs on the master, the primary and all the secondaries.
1670 763ad5be Thomas Thrainer

1671 763ad5be Thomas Thrainer
    """
1672 763ad5be Thomas Thrainer
    instance = self.replacer.instance
1673 763ad5be Thomas Thrainer
    env = {
1674 763ad5be Thomas Thrainer
      "MODE": self.op.mode,
1675 763ad5be Thomas Thrainer
      "NEW_SECONDARY": self.op.remote_node,
1676 1c3231aa Thomas Thrainer
      "OLD_SECONDARY": self.cfg.GetNodeName(instance.secondary_nodes[0]),
1677 763ad5be Thomas Thrainer
      }
1678 5eacbcae Thomas Thrainer
    env.update(BuildInstanceHookEnvByObject(self, instance))
1679 763ad5be Thomas Thrainer
    return env
1680 763ad5be Thomas Thrainer
1681 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
1682 763ad5be Thomas Thrainer
    """Build hooks nodes.
1683 763ad5be Thomas Thrainer

1684 763ad5be Thomas Thrainer
    """
1685 763ad5be Thomas Thrainer
    instance = self.replacer.instance
1686 763ad5be Thomas Thrainer
    nl = [
1687 763ad5be Thomas Thrainer
      self.cfg.GetMasterNode(),
1688 763ad5be Thomas Thrainer
      instance.primary_node,
1689 763ad5be Thomas Thrainer
      ]
1690 1c3231aa Thomas Thrainer
    if self.op.remote_node_uuid is not None:
1691 1c3231aa Thomas Thrainer
      nl.append(self.op.remote_node_uuid)
1692 763ad5be Thomas Thrainer
    return nl, nl
1693 763ad5be Thomas Thrainer
1694 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1695 763ad5be Thomas Thrainer
    """Check prerequisites.
1696 763ad5be Thomas Thrainer

1697 763ad5be Thomas Thrainer
    """
1698 763ad5be Thomas Thrainer
    assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
1699 763ad5be Thomas Thrainer
            self.op.iallocator is None)
1700 763ad5be Thomas Thrainer
1701 763ad5be Thomas Thrainer
    # Verify if node group locks are still correct
1702 763ad5be Thomas Thrainer
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
1703 763ad5be Thomas Thrainer
    if owned_groups:
1704 da4a52a3 Thomas Thrainer
      CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, owned_groups)
1705 763ad5be Thomas Thrainer
1706 763ad5be Thomas Thrainer
    return LogicalUnit.CheckPrereq(self)
1707 763ad5be Thomas Thrainer
1708 763ad5be Thomas Thrainer
1709 763ad5be Thomas Thrainer
class LUInstanceActivateDisks(NoHooksLU):
1710 763ad5be Thomas Thrainer
  """Bring up an instance's disks.
1711 763ad5be Thomas Thrainer

1712 763ad5be Thomas Thrainer
  """
1713 763ad5be Thomas Thrainer
  REQ_BGL = False
1714 763ad5be Thomas Thrainer
1715 763ad5be Thomas Thrainer
  def ExpandNames(self):
1716 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1717 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1718 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1719 763ad5be Thomas Thrainer
1720 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1721 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1722 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1723 763ad5be Thomas Thrainer
1724 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1725 763ad5be Thomas Thrainer
    """Check prerequisites.
1726 763ad5be Thomas Thrainer

1727 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1728 763ad5be Thomas Thrainer

1729 763ad5be Thomas Thrainer
    """
1730 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1731 763ad5be Thomas Thrainer
    assert self.instance is not None, \
1732 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1733 5eacbcae Thomas Thrainer
    CheckNodeOnline(self, self.instance.primary_node)
1734 763ad5be Thomas Thrainer
1735 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1736 763ad5be Thomas Thrainer
    """Activate the disks.
1737 763ad5be Thomas Thrainer

1738 763ad5be Thomas Thrainer
    """
1739 763ad5be Thomas Thrainer
    disks_ok, disks_info = \
1740 5eacbcae Thomas Thrainer
              AssembleInstanceDisks(self, self.instance,
1741 5eacbcae Thomas Thrainer
                                    ignore_size=self.op.ignore_size)
1742 763ad5be Thomas Thrainer
    if not disks_ok:
1743 763ad5be Thomas Thrainer
      raise errors.OpExecError("Cannot activate block devices")
1744 763ad5be Thomas Thrainer
1745 763ad5be Thomas Thrainer
    if self.op.wait_for_sync:
1746 5eacbcae Thomas Thrainer
      if not WaitForSync(self, self.instance):
1747 da4a52a3 Thomas Thrainer
        self.cfg.MarkInstanceDisksInactive(self.instance.uuid)
1748 763ad5be Thomas Thrainer
        raise errors.OpExecError("Some disks of the instance are degraded!")
1749 763ad5be Thomas Thrainer
1750 763ad5be Thomas Thrainer
    return disks_info
1751 763ad5be Thomas Thrainer
1752 763ad5be Thomas Thrainer
1753 763ad5be Thomas Thrainer
class LUInstanceDeactivateDisks(NoHooksLU):
1754 763ad5be Thomas Thrainer
  """Shutdown an instance's disks.
1755 763ad5be Thomas Thrainer

1756 763ad5be Thomas Thrainer
  """
1757 763ad5be Thomas Thrainer
  REQ_BGL = False
1758 763ad5be Thomas Thrainer
1759 763ad5be Thomas Thrainer
  def ExpandNames(self):
1760 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1761 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1762 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1763 763ad5be Thomas Thrainer
1764 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1765 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1766 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1767 763ad5be Thomas Thrainer
1768 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1769 763ad5be Thomas Thrainer
    """Check prerequisites.
1770 763ad5be Thomas Thrainer

1771 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1772 763ad5be Thomas Thrainer

1773 763ad5be Thomas Thrainer
    """
1774 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1775 763ad5be Thomas Thrainer
    assert self.instance is not None, \
1776 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1777 763ad5be Thomas Thrainer
1778 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1779 763ad5be Thomas Thrainer
    """Deactivate the disks
1780 763ad5be Thomas Thrainer

1781 763ad5be Thomas Thrainer
    """
1782 763ad5be Thomas Thrainer
    if self.op.force:
1783 d0d7d7cf Thomas Thrainer
      ShutdownInstanceDisks(self, self.instance)
1784 763ad5be Thomas Thrainer
    else:
1785 d0d7d7cf Thomas Thrainer
      _SafeShutdownInstanceDisks(self, self.instance)
1786 763ad5be Thomas Thrainer
1787 763ad5be Thomas Thrainer
1788 1c3231aa Thomas Thrainer
def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
1789 763ad5be Thomas Thrainer
                               ldisk=False):
1790 763ad5be Thomas Thrainer
  """Check that mirrors are not degraded.
1791 763ad5be Thomas Thrainer

1792 763ad5be Thomas Thrainer
  @attention: The device has to be annotated already.
1793 763ad5be Thomas Thrainer

1794 763ad5be Thomas Thrainer
  The ldisk parameter, if True, will change the test from the
1795 763ad5be Thomas Thrainer
  is_degraded attribute (which represents overall non-ok status for
1796 763ad5be Thomas Thrainer
  the device(s)) to the ldisk (representing the local storage status).
1797 763ad5be Thomas Thrainer

1798 763ad5be Thomas Thrainer
  """
1799 1c3231aa Thomas Thrainer
  lu.cfg.SetDiskID(dev, node_uuid)
1800 763ad5be Thomas Thrainer
1801 763ad5be Thomas Thrainer
  result = True
1802 763ad5be Thomas Thrainer
1803 763ad5be Thomas Thrainer
  if on_primary or dev.AssembleOnSecondary():
1804 1c3231aa Thomas Thrainer
    rstats = lu.rpc.call_blockdev_find(node_uuid, dev)
1805 763ad5be Thomas Thrainer
    msg = rstats.fail_msg
1806 763ad5be Thomas Thrainer
    if msg:
1807 1c3231aa Thomas Thrainer
      lu.LogWarning("Can't find disk on node %s: %s",
1808 1c3231aa Thomas Thrainer
                    lu.cfg.GetNodeName(node_uuid), msg)
1809 763ad5be Thomas Thrainer
      result = False
1810 763ad5be Thomas Thrainer
    elif not rstats.payload:
1811 1c3231aa Thomas Thrainer
      lu.LogWarning("Can't find disk on node %s", lu.cfg.GetNodeName(node_uuid))
1812 763ad5be Thomas Thrainer
      result = False
1813 763ad5be Thomas Thrainer
    else:
1814 763ad5be Thomas Thrainer
      if ldisk:
1815 763ad5be Thomas Thrainer
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
1816 763ad5be Thomas Thrainer
      else:
1817 763ad5be Thomas Thrainer
        result = result and not rstats.payload.is_degraded
1818 763ad5be Thomas Thrainer
1819 763ad5be Thomas Thrainer
  if dev.children:
1820 763ad5be Thomas Thrainer
    for child in dev.children:
1821 1c3231aa Thomas Thrainer
      result = result and _CheckDiskConsistencyInner(lu, instance, child,
1822 1c3231aa Thomas Thrainer
                                                     node_uuid, on_primary)
1823 763ad5be Thomas Thrainer
1824 763ad5be Thomas Thrainer
  return result
1825 763ad5be Thomas Thrainer
1826 763ad5be Thomas Thrainer
1827 1c3231aa Thomas Thrainer
def CheckDiskConsistency(lu, instance, dev, node_uuid, on_primary, ldisk=False):
1828 763ad5be Thomas Thrainer
  """Wrapper around L{_CheckDiskConsistencyInner}.
1829 763ad5be Thomas Thrainer

1830 763ad5be Thomas Thrainer
  """
1831 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
1832 1c3231aa Thomas Thrainer
  return _CheckDiskConsistencyInner(lu, instance, disk, node_uuid, on_primary,
1833 763ad5be Thomas Thrainer
                                    ldisk=ldisk)
1834 763ad5be Thomas Thrainer
1835 763ad5be Thomas Thrainer
1836 1c3231aa Thomas Thrainer
def _BlockdevFind(lu, node_uuid, dev, instance):
1837 763ad5be Thomas Thrainer
  """Wrapper around call_blockdev_find to annotate diskparams.
1838 763ad5be Thomas Thrainer

1839 763ad5be Thomas Thrainer
  @param lu: A reference to the lu object
1840 1c3231aa Thomas Thrainer
  @param node_uuid: The node to call out
1841 763ad5be Thomas Thrainer
  @param dev: The device to find
1842 763ad5be Thomas Thrainer
  @param instance: The instance object the device belongs to
1843 763ad5be Thomas Thrainer
  @returns The result of the rpc call
1844 763ad5be Thomas Thrainer

1845 763ad5be Thomas Thrainer
  """
1846 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
1847 1c3231aa Thomas Thrainer
  return lu.rpc.call_blockdev_find(node_uuid, disk)
1848 763ad5be Thomas Thrainer
1849 763ad5be Thomas Thrainer
1850 763ad5be Thomas Thrainer
def _GenerateUniqueNames(lu, exts):
1851 763ad5be Thomas Thrainer
  """Generate a suitable LV name.
1852 763ad5be Thomas Thrainer

1853 763ad5be Thomas Thrainer
  This will generate a logical volume name for the given instance.
1854 763ad5be Thomas Thrainer

1855 763ad5be Thomas Thrainer
  """
1856 763ad5be Thomas Thrainer
  results = []
1857 763ad5be Thomas Thrainer
  for val in exts:
1858 763ad5be Thomas Thrainer
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
1859 763ad5be Thomas Thrainer
    results.append("%s%s" % (new_id, val))
1860 763ad5be Thomas Thrainer
  return results
1861 763ad5be Thomas Thrainer
1862 763ad5be Thomas Thrainer
1863 763ad5be Thomas Thrainer
class TLReplaceDisks(Tasklet):
1864 763ad5be Thomas Thrainer
  """Replaces disks for an instance.
1865 763ad5be Thomas Thrainer

1866 763ad5be Thomas Thrainer
  Note: Locking is not within the scope of this class.
1867 763ad5be Thomas Thrainer

1868 763ad5be Thomas Thrainer
  """
1869 da4a52a3 Thomas Thrainer
  def __init__(self, lu, instance_uuid, instance_name, mode, iallocator_name,
1870 da4a52a3 Thomas Thrainer
               remote_node_uuid, disks, early_release, ignore_ipolicy):
1871 763ad5be Thomas Thrainer
    """Initializes this class.
1872 763ad5be Thomas Thrainer

1873 763ad5be Thomas Thrainer
    """
1874 763ad5be Thomas Thrainer
    Tasklet.__init__(self, lu)
1875 763ad5be Thomas Thrainer
1876 763ad5be Thomas Thrainer
    # Parameters
1877 da4a52a3 Thomas Thrainer
    self.instance_uuid = instance_uuid
1878 763ad5be Thomas Thrainer
    self.instance_name = instance_name
1879 763ad5be Thomas Thrainer
    self.mode = mode
1880 763ad5be Thomas Thrainer
    self.iallocator_name = iallocator_name
1881 1c3231aa Thomas Thrainer
    self.remote_node_uuid = remote_node_uuid
1882 763ad5be Thomas Thrainer
    self.disks = disks
1883 763ad5be Thomas Thrainer
    self.early_release = early_release
1884 763ad5be Thomas Thrainer
    self.ignore_ipolicy = ignore_ipolicy
1885 763ad5be Thomas Thrainer
1886 763ad5be Thomas Thrainer
    # Runtime data
1887 763ad5be Thomas Thrainer
    self.instance = None
1888 1c3231aa Thomas Thrainer
    self.new_node_uuid = None
1889 1c3231aa Thomas Thrainer
    self.target_node_uuid = None
1890 1c3231aa Thomas Thrainer
    self.other_node_uuid = None
1891 763ad5be Thomas Thrainer
    self.remote_node_info = None
1892 763ad5be Thomas Thrainer
    self.node_secondary_ip = None
1893 763ad5be Thomas Thrainer
1894 763ad5be Thomas Thrainer
  @staticmethod
1895 da4a52a3 Thomas Thrainer
  def _RunAllocator(lu, iallocator_name, instance_uuid,
1896 1c3231aa Thomas Thrainer
                    relocate_from_node_uuids):
1897 763ad5be Thomas Thrainer
    """Compute a new secondary node using an IAllocator.
1898 763ad5be Thomas Thrainer

1899 763ad5be Thomas Thrainer
    """
1900 1c3231aa Thomas Thrainer
    req = iallocator.IAReqRelocate(
1901 da4a52a3 Thomas Thrainer
          inst_uuid=instance_uuid,
1902 1c3231aa Thomas Thrainer
          relocate_from_node_uuids=list(relocate_from_node_uuids))
1903 763ad5be Thomas Thrainer
    ial = iallocator.IAllocator(lu.cfg, lu.rpc, req)
1904 763ad5be Thomas Thrainer
1905 763ad5be Thomas Thrainer
    ial.Run(iallocator_name)
1906 763ad5be Thomas Thrainer
1907 763ad5be Thomas Thrainer
    if not ial.success:
1908 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
1909 763ad5be Thomas Thrainer
                                 " %s" % (iallocator_name, ial.info),
1910 763ad5be Thomas Thrainer
                                 errors.ECODE_NORES)
1911 763ad5be Thomas Thrainer
1912 763ad5be Thomas Thrainer
    remote_node_name = ial.result[0]
1913 1c3231aa Thomas Thrainer
    remote_node = lu.cfg.GetNodeInfoByName(remote_node_name)
1914 1c3231aa Thomas Thrainer
1915 1c3231aa Thomas Thrainer
    if remote_node is None:
1916 1c3231aa Thomas Thrainer
      raise errors.OpPrereqError("Node %s not found in configuration" %
1917 1c3231aa Thomas Thrainer
                                 remote_node_name, errors.ECODE_NOENT)
1918 763ad5be Thomas Thrainer
1919 763ad5be Thomas Thrainer
    lu.LogInfo("Selected new secondary for instance '%s': %s",
1920 da4a52a3 Thomas Thrainer
               instance_uuid, remote_node_name)
1921 763ad5be Thomas Thrainer
1922 1c3231aa Thomas Thrainer
    return remote_node.uuid
1923 763ad5be Thomas Thrainer
1924 1c3231aa Thomas Thrainer
  def _FindFaultyDisks(self, node_uuid):
1925 5eacbcae Thomas Thrainer
    """Wrapper for L{FindFaultyInstanceDisks}.
1926 763ad5be Thomas Thrainer

1927 763ad5be Thomas Thrainer
    """
1928 5eacbcae Thomas Thrainer
    return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
1929 1c3231aa Thomas Thrainer
                                   node_uuid, True)
1930 763ad5be Thomas Thrainer
1931 763ad5be Thomas Thrainer
  def _CheckDisksActivated(self, instance):
1932 763ad5be Thomas Thrainer
    """Checks if the instance disks are activated.
1933 763ad5be Thomas Thrainer

1934 763ad5be Thomas Thrainer
    @param instance: The instance to check disks
1935 763ad5be Thomas Thrainer
    @return: True if they are activated, False otherwise
1936 763ad5be Thomas Thrainer

1937 763ad5be Thomas Thrainer
    """
1938 1c3231aa Thomas Thrainer
    node_uuids = instance.all_nodes
1939 763ad5be Thomas Thrainer
1940 763ad5be Thomas Thrainer
    for idx, dev in enumerate(instance.disks):
1941 1c3231aa Thomas Thrainer
      for node_uuid in node_uuids:
1942 1c3231aa Thomas Thrainer
        self.lu.LogInfo("Checking disk/%d on %s", idx,
1943 1c3231aa Thomas Thrainer
                        self.cfg.GetNodeName(node_uuid))
1944 1c3231aa Thomas Thrainer
        self.cfg.SetDiskID(dev, node_uuid)
1945 763ad5be Thomas Thrainer
1946 1c3231aa Thomas Thrainer
        result = _BlockdevFind(self, node_uuid, dev, instance)
1947 763ad5be Thomas Thrainer
1948 763ad5be Thomas Thrainer
        if result.offline:
1949 763ad5be Thomas Thrainer
          continue
1950 763ad5be Thomas Thrainer
        elif result.fail_msg or not result.payload:
1951 763ad5be Thomas Thrainer
          return False
1952 763ad5be Thomas Thrainer
1953 763ad5be Thomas Thrainer
    return True
1954 763ad5be Thomas Thrainer
1955 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1956 763ad5be Thomas Thrainer
    """Check prerequisites.
1957 763ad5be Thomas Thrainer

1958 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1959 763ad5be Thomas Thrainer

1960 763ad5be Thomas Thrainer
    """
1961 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
1962 d0d7d7cf Thomas Thrainer
    assert self.instance is not None, \
1963 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.instance_name
1964 763ad5be Thomas Thrainer
1965 d0d7d7cf Thomas Thrainer
    if self.instance.disk_template != constants.DT_DRBD8:
1966 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
1967 763ad5be Thomas Thrainer
                                 " instances", errors.ECODE_INVAL)
1968 763ad5be Thomas Thrainer
1969 d0d7d7cf Thomas Thrainer
    if len(self.instance.secondary_nodes) != 1:
1970 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The instance has a strange layout,"
1971 763ad5be Thomas Thrainer
                                 " expected one secondary but found %d" %
1972 d0d7d7cf Thomas Thrainer
                                 len(self.instance.secondary_nodes),
1973 763ad5be Thomas Thrainer
                                 errors.ECODE_FAULT)
1974 763ad5be Thomas Thrainer
1975 d0d7d7cf Thomas Thrainer
    secondary_node_uuid = self.instance.secondary_nodes[0]
1976 763ad5be Thomas Thrainer
1977 763ad5be Thomas Thrainer
    if self.iallocator_name is None:
1978 1c3231aa Thomas Thrainer
      remote_node_uuid = self.remote_node_uuid
1979 763ad5be Thomas Thrainer
    else:
1980 1c3231aa Thomas Thrainer
      remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name,
1981 da4a52a3 Thomas Thrainer
                                            self.instance.uuid,
1982 d0d7d7cf Thomas Thrainer
                                            self.instance.secondary_nodes)
1983 763ad5be Thomas Thrainer
1984 1c3231aa Thomas Thrainer
    if remote_node_uuid is None:
1985 763ad5be Thomas Thrainer
      self.remote_node_info = None
1986 763ad5be Thomas Thrainer
    else:
1987 1c3231aa Thomas Thrainer
      assert remote_node_uuid in self.lu.owned_locks(locking.LEVEL_NODE), \
1988 1c3231aa Thomas Thrainer
             "Remote node '%s' is not locked" % remote_node_uuid
1989 763ad5be Thomas Thrainer
1990 1c3231aa Thomas Thrainer
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node_uuid)
1991 763ad5be Thomas Thrainer
      assert self.remote_node_info is not None, \
1992 1c3231aa Thomas Thrainer
        "Cannot retrieve locked node %s" % remote_node_uuid
1993 763ad5be Thomas Thrainer
1994 1c3231aa Thomas Thrainer
    if remote_node_uuid == self.instance.primary_node:
1995 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The specified node is the primary node of"
1996 763ad5be Thomas Thrainer
                                 " the instance", errors.ECODE_INVAL)
1997 763ad5be Thomas Thrainer
1998 1c3231aa Thomas Thrainer
    if remote_node_uuid == secondary_node_uuid:
1999 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The specified node is already the"
2000 763ad5be Thomas Thrainer
                                 " secondary node of the instance",
2001 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
2002 763ad5be Thomas Thrainer
2003 763ad5be Thomas Thrainer
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
2004 763ad5be Thomas Thrainer
                                    constants.REPLACE_DISK_CHG):
2005 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
2006 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
2007 763ad5be Thomas Thrainer
2008 763ad5be Thomas Thrainer
    if self.mode == constants.REPLACE_DISK_AUTO:
2009 d0d7d7cf Thomas Thrainer
      if not self._CheckDisksActivated(self.instance):
2010 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Please run activate-disks on instance %s"
2011 763ad5be Thomas Thrainer
                                   " first" % self.instance_name,
2012 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
2013 d0d7d7cf Thomas Thrainer
      faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
2014 1c3231aa Thomas Thrainer
      faulty_secondary = self._FindFaultyDisks(secondary_node_uuid)
2015 763ad5be Thomas Thrainer
2016 763ad5be Thomas Thrainer
      if faulty_primary and faulty_secondary:
2017 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
2018 763ad5be Thomas Thrainer
                                   " one node and can not be repaired"
2019 763ad5be Thomas Thrainer
                                   " automatically" % self.instance_name,
2020 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
2021 763ad5be Thomas Thrainer
2022 763ad5be Thomas Thrainer
      if faulty_primary:
2023 763ad5be Thomas Thrainer
        self.disks = faulty_primary
2024 d0d7d7cf Thomas Thrainer
        self.target_node_uuid = self.instance.primary_node
2025 1c3231aa Thomas Thrainer
        self.other_node_uuid = secondary_node_uuid
2026 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2027 763ad5be Thomas Thrainer
      elif faulty_secondary:
2028 763ad5be Thomas Thrainer
        self.disks = faulty_secondary
2029 1c3231aa Thomas Thrainer
        self.target_node_uuid = secondary_node_uuid
2030 d0d7d7cf Thomas Thrainer
        self.other_node_uuid = self.instance.primary_node
2031 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2032 763ad5be Thomas Thrainer
      else:
2033 763ad5be Thomas Thrainer
        self.disks = []
2034 763ad5be Thomas Thrainer
        check_nodes = []
2035 763ad5be Thomas Thrainer
2036 763ad5be Thomas Thrainer
    else:
2037 763ad5be Thomas Thrainer
      # Non-automatic modes
2038 763ad5be Thomas Thrainer
      if self.mode == constants.REPLACE_DISK_PRI:
2039 d0d7d7cf Thomas Thrainer
        self.target_node_uuid = self.instance.primary_node
2040 1c3231aa Thomas Thrainer
        self.other_node_uuid = secondary_node_uuid
2041 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2042 763ad5be Thomas Thrainer
2043 763ad5be Thomas Thrainer
      elif self.mode == constants.REPLACE_DISK_SEC:
2044 1c3231aa Thomas Thrainer
        self.target_node_uuid = secondary_node_uuid
2045 d0d7d7cf Thomas Thrainer
        self.other_node_uuid = self.instance.primary_node
2046 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2047 763ad5be Thomas Thrainer
2048 763ad5be Thomas Thrainer
      elif self.mode == constants.REPLACE_DISK_CHG:
2049 1c3231aa Thomas Thrainer
        self.new_node_uuid = remote_node_uuid
2050 d0d7d7cf Thomas Thrainer
        self.other_node_uuid = self.instance.primary_node
2051 1c3231aa Thomas Thrainer
        self.target_node_uuid = secondary_node_uuid
2052 1c3231aa Thomas Thrainer
        check_nodes = [self.new_node_uuid, self.other_node_uuid]
2053 763ad5be Thomas Thrainer
2054 1c3231aa Thomas Thrainer
        CheckNodeNotDrained(self.lu, remote_node_uuid)
2055 1c3231aa Thomas Thrainer
        CheckNodeVmCapable(self.lu, remote_node_uuid)
2056 763ad5be Thomas Thrainer
2057 1c3231aa Thomas Thrainer
        old_node_info = self.cfg.GetNodeInfo(secondary_node_uuid)
2058 763ad5be Thomas Thrainer
        assert old_node_info is not None
2059 763ad5be Thomas Thrainer
        if old_node_info.offline and not self.early_release:
2060 763ad5be Thomas Thrainer
          # doesn't make sense to delay the release
2061 763ad5be Thomas Thrainer
          self.early_release = True
2062 763ad5be Thomas Thrainer
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
2063 1c3231aa Thomas Thrainer
                          " early-release mode", secondary_node_uuid)
2064 763ad5be Thomas Thrainer
2065 763ad5be Thomas Thrainer
      else:
2066 763ad5be Thomas Thrainer
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
2067 763ad5be Thomas Thrainer
                                     self.mode)
2068 763ad5be Thomas Thrainer
2069 763ad5be Thomas Thrainer
      # If not specified all disks should be replaced
2070 763ad5be Thomas Thrainer
      if not self.disks:
2071 763ad5be Thomas Thrainer
        self.disks = range(len(self.instance.disks))
2072 763ad5be Thomas Thrainer
2073 763ad5be Thomas Thrainer
    # TODO: This is ugly, but right now we can't distinguish between internal
2074 763ad5be Thomas Thrainer
    # submitted opcode and external one. We should fix that.
2075 763ad5be Thomas Thrainer
    if self.remote_node_info:
2076 763ad5be Thomas Thrainer
      # We change the node, lets verify it still meets instance policy
2077 763ad5be Thomas Thrainer
      new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
2078 763ad5be Thomas Thrainer
      cluster = self.cfg.GetClusterInfo()
2079 763ad5be Thomas Thrainer
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2080 763ad5be Thomas Thrainer
                                                              new_group_info)
2081 d0d7d7cf Thomas Thrainer
      CheckTargetNodeIPolicy(self, ipolicy, self.instance,
2082 d0d7d7cf Thomas Thrainer
                             self.remote_node_info, self.cfg,
2083 d0d7d7cf Thomas Thrainer
                             ignore=self.ignore_ipolicy)
2084 763ad5be Thomas Thrainer
2085 1c3231aa Thomas Thrainer
    for node_uuid in check_nodes:
2086 1c3231aa Thomas Thrainer
      CheckNodeOnline(self.lu, node_uuid)
2087 763ad5be Thomas Thrainer
2088 1c3231aa Thomas Thrainer
    touched_nodes = frozenset(node_uuid for node_uuid in [self.new_node_uuid,
2089 1c3231aa Thomas Thrainer
                                                          self.other_node_uuid,
2090 1c3231aa Thomas Thrainer
                                                          self.target_node_uuid]
2091 1c3231aa Thomas Thrainer
                              if node_uuid is not None)
2092 763ad5be Thomas Thrainer
2093 763ad5be Thomas Thrainer
    # Release unneeded node and node resource locks
2094 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
2095 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
2096 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
2097 763ad5be Thomas Thrainer
2098 763ad5be Thomas Thrainer
    # Release any owned node group
2099 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
2100 763ad5be Thomas Thrainer
2101 763ad5be Thomas Thrainer
    # Check whether disks are valid
2102 763ad5be Thomas Thrainer
    for disk_idx in self.disks:
2103 d0d7d7cf Thomas Thrainer
      self.instance.FindDisk(disk_idx)
2104 763ad5be Thomas Thrainer
2105 763ad5be Thomas Thrainer
    # Get secondary node IP addresses
2106 1c3231aa Thomas Thrainer
    self.node_secondary_ip = dict((uuid, node.secondary_ip) for (uuid, node)
2107 763ad5be Thomas Thrainer
                                  in self.cfg.GetMultiNodeInfo(touched_nodes))
2108 763ad5be Thomas Thrainer
2109 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
2110 763ad5be Thomas Thrainer
    """Execute disk replacement.
2111 763ad5be Thomas Thrainer

2112 763ad5be Thomas Thrainer
    This dispatches the disk replacement to the appropriate handler.
2113 763ad5be Thomas Thrainer

2114 763ad5be Thomas Thrainer
    """
2115 763ad5be Thomas Thrainer
    if __debug__:
2116 763ad5be Thomas Thrainer
      # Verify owned locks before starting operation
2117 763ad5be Thomas Thrainer
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
2118 763ad5be Thomas Thrainer
      assert set(owned_nodes) == set(self.node_secondary_ip), \
2119 763ad5be Thomas Thrainer
          ("Incorrect node locks, owning %s, expected %s" %
2120 763ad5be Thomas Thrainer
           (owned_nodes, self.node_secondary_ip.keys()))
2121 763ad5be Thomas Thrainer
      assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
2122 763ad5be Thomas Thrainer
              self.lu.owned_locks(locking.LEVEL_NODE_RES))
2123 763ad5be Thomas Thrainer
      assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
2124 763ad5be Thomas Thrainer
2125 763ad5be Thomas Thrainer
      owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
2126 763ad5be Thomas Thrainer
      assert list(owned_instances) == [self.instance_name], \
2127 763ad5be Thomas Thrainer
          "Instance '%s' not locked" % self.instance_name
2128 763ad5be Thomas Thrainer
2129 763ad5be Thomas Thrainer
      assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
2130 763ad5be Thomas Thrainer
          "Should not own any node group lock at this point"
2131 763ad5be Thomas Thrainer
2132 763ad5be Thomas Thrainer
    if not self.disks:
2133 763ad5be Thomas Thrainer
      feedback_fn("No disks need replacement for instance '%s'" %
2134 763ad5be Thomas Thrainer
                  self.instance.name)
2135 763ad5be Thomas Thrainer
      return
2136 763ad5be Thomas Thrainer
2137 763ad5be Thomas Thrainer
    feedback_fn("Replacing disk(s) %s for instance '%s'" %
2138 763ad5be Thomas Thrainer
                (utils.CommaJoin(self.disks), self.instance.name))
2139 1c3231aa Thomas Thrainer
    feedback_fn("Current primary node: %s" %
2140 1c3231aa Thomas Thrainer
                self.cfg.GetNodeName(self.instance.primary_node))
2141 763ad5be Thomas Thrainer
    feedback_fn("Current seconary node: %s" %
2142 1c3231aa Thomas Thrainer
                utils.CommaJoin(self.cfg.GetNodeNames(
2143 1c3231aa Thomas Thrainer
                                  self.instance.secondary_nodes)))
2144 763ad5be Thomas Thrainer
2145 1d4a4b26 Thomas Thrainer
    activate_disks = not self.instance.disks_active
2146 763ad5be Thomas Thrainer
2147 763ad5be Thomas Thrainer
    # Activate the instance disks if we're replacing them on a down instance
2148 763ad5be Thomas Thrainer
    if activate_disks:
2149 5eacbcae Thomas Thrainer
      StartInstanceDisks(self.lu, self.instance, True)
2150 763ad5be Thomas Thrainer
2151 763ad5be Thomas Thrainer
    try:
2152 763ad5be Thomas Thrainer
      # Should we replace the secondary node?
2153 1c3231aa Thomas Thrainer
      if self.new_node_uuid is not None:
2154 763ad5be Thomas Thrainer
        fn = self._ExecDrbd8Secondary
2155 763ad5be Thomas Thrainer
      else:
2156 763ad5be Thomas Thrainer
        fn = self._ExecDrbd8DiskOnly
2157 763ad5be Thomas Thrainer
2158 763ad5be Thomas Thrainer
      result = fn(feedback_fn)
2159 763ad5be Thomas Thrainer
    finally:
2160 763ad5be Thomas Thrainer
      # Deactivate the instance disks if we're replacing them on a
2161 763ad5be Thomas Thrainer
      # down instance
2162 763ad5be Thomas Thrainer
      if activate_disks:
2163 763ad5be Thomas Thrainer
        _SafeShutdownInstanceDisks(self.lu, self.instance)
2164 763ad5be Thomas Thrainer
2165 763ad5be Thomas Thrainer
    assert not self.lu.owned_locks(locking.LEVEL_NODE)
2166 763ad5be Thomas Thrainer
2167 763ad5be Thomas Thrainer
    if __debug__:
2168 763ad5be Thomas Thrainer
      # Verify owned locks
2169 763ad5be Thomas Thrainer
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
2170 763ad5be Thomas Thrainer
      nodes = frozenset(self.node_secondary_ip)
2171 763ad5be Thomas Thrainer
      assert ((self.early_release and not owned_nodes) or
2172 763ad5be Thomas Thrainer
              (not self.early_release and not (set(owned_nodes) - nodes))), \
2173 763ad5be Thomas Thrainer
        ("Not owning the correct locks, early_release=%s, owned=%r,"
2174 763ad5be Thomas Thrainer
         " nodes=%r" % (self.early_release, owned_nodes, nodes))
2175 763ad5be Thomas Thrainer
2176 763ad5be Thomas Thrainer
    return result
2177 763ad5be Thomas Thrainer
2178 1c3231aa Thomas Thrainer
  def _CheckVolumeGroup(self, node_uuids):
2179 763ad5be Thomas Thrainer
    self.lu.LogInfo("Checking volume groups")
2180 763ad5be Thomas Thrainer
2181 763ad5be Thomas Thrainer
    vgname = self.cfg.GetVGName()
2182 763ad5be Thomas Thrainer
2183 763ad5be Thomas Thrainer
    # Make sure volume group exists on all involved nodes
2184 1c3231aa Thomas Thrainer
    results = self.rpc.call_vg_list(node_uuids)
2185 763ad5be Thomas Thrainer
    if not results:
2186 763ad5be Thomas Thrainer
      raise errors.OpExecError("Can't list volume groups on the nodes")
2187 763ad5be Thomas Thrainer
2188 1c3231aa Thomas Thrainer
    for node_uuid in node_uuids:
2189 1c3231aa Thomas Thrainer
      res = results[node_uuid]
2190 1c3231aa Thomas Thrainer
      res.Raise("Error checking node %s" % self.cfg.GetNodeName(node_uuid))
2191 763ad5be Thomas Thrainer
      if vgname not in res.payload:
2192 763ad5be Thomas Thrainer
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
2193 1c3231aa Thomas Thrainer
                                 (vgname, self.cfg.GetNodeName(node_uuid)))
2194 763ad5be Thomas Thrainer
2195 1c3231aa Thomas Thrainer
  def _CheckDisksExistence(self, node_uuids):
2196 763ad5be Thomas Thrainer
    # Check disk existence
2197 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2198 763ad5be Thomas Thrainer
      if idx not in self.disks:
2199 763ad5be Thomas Thrainer
        continue
2200 763ad5be Thomas Thrainer
2201 1c3231aa Thomas Thrainer
      for node_uuid in node_uuids:
2202 1c3231aa Thomas Thrainer
        self.lu.LogInfo("Checking disk/%d on %s", idx,
2203 1c3231aa Thomas Thrainer
                        self.cfg.GetNodeName(node_uuid))
2204 1c3231aa Thomas Thrainer
        self.cfg.SetDiskID(dev, node_uuid)
2205 763ad5be Thomas Thrainer
2206 1c3231aa Thomas Thrainer
        result = _BlockdevFind(self, node_uuid, dev, self.instance)
2207 763ad5be Thomas Thrainer
2208 763ad5be Thomas Thrainer
        msg = result.fail_msg
2209 763ad5be Thomas Thrainer
        if msg or not result.payload:
2210 763ad5be Thomas Thrainer
          if not msg:
2211 763ad5be Thomas Thrainer
            msg = "disk not found"
2212 34ea8da3 Michele Tartara
          if not self._CheckDisksActivated(self.instance):
2213 34ea8da3 Michele Tartara
            extra_hint = ("\nDisks seem to be not properly activated. Try"
2214 34ea8da3 Michele Tartara
                          " running activate-disks on the instance before"
2215 34ea8da3 Michele Tartara
                          " using replace-disks.")
2216 34ea8da3 Michele Tartara
          else:
2217 34ea8da3 Michele Tartara
            extra_hint = ""
2218 34ea8da3 Michele Tartara
          raise errors.OpExecError("Can't find disk/%d on node %s: %s%s" %
2219 f9dfa8df Klaus Aehlig
                                   (idx, self.cfg.GetNodeName(node_uuid), msg,
2220 f9dfa8df Klaus Aehlig
                                    extra_hint))
2221 763ad5be Thomas Thrainer
2222 1c3231aa Thomas Thrainer
  def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk):
2223 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2224 763ad5be Thomas Thrainer
      if idx not in self.disks:
2225 763ad5be Thomas Thrainer
        continue
2226 763ad5be Thomas Thrainer
2227 763ad5be Thomas Thrainer
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
2228 1c3231aa Thomas Thrainer
                      (idx, self.cfg.GetNodeName(node_uuid)))
2229 763ad5be Thomas Thrainer
2230 1c3231aa Thomas Thrainer
      if not CheckDiskConsistency(self.lu, self.instance, dev, node_uuid,
2231 5eacbcae Thomas Thrainer
                                  on_primary, ldisk=ldisk):
2232 763ad5be Thomas Thrainer
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
2233 763ad5be Thomas Thrainer
                                 " replace disks for instance %s" %
2234 1c3231aa Thomas Thrainer
                                 (self.cfg.GetNodeName(node_uuid),
2235 1c3231aa Thomas Thrainer
                                  self.instance.name))
2236 763ad5be Thomas Thrainer
2237 1c3231aa Thomas Thrainer
  def _CreateNewStorage(self, node_uuid):
2238 763ad5be Thomas Thrainer
    """Create new storage on the primary or secondary node.
2239 763ad5be Thomas Thrainer

2240 763ad5be Thomas Thrainer
    This is only used for same-node replaces, not for changing the
2241 763ad5be Thomas Thrainer
    secondary node, hence we don't want to modify the existing disk.
2242 763ad5be Thomas Thrainer

2243 763ad5be Thomas Thrainer
    """
2244 763ad5be Thomas Thrainer
    iv_names = {}
2245 763ad5be Thomas Thrainer
2246 5eacbcae Thomas Thrainer
    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
2247 763ad5be Thomas Thrainer
    for idx, dev in enumerate(disks):
2248 763ad5be Thomas Thrainer
      if idx not in self.disks:
2249 763ad5be Thomas Thrainer
        continue
2250 763ad5be Thomas Thrainer
2251 1c3231aa Thomas Thrainer
      self.lu.LogInfo("Adding storage on %s for disk/%d",
2252 1c3231aa Thomas Thrainer
                      self.cfg.GetNodeName(node_uuid), idx)
2253 763ad5be Thomas Thrainer
2254 1c3231aa Thomas Thrainer
      self.cfg.SetDiskID(dev, node_uuid)
2255 763ad5be Thomas Thrainer
2256 763ad5be Thomas Thrainer
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
2257 763ad5be Thomas Thrainer
      names = _GenerateUniqueNames(self.lu, lv_names)
2258 763ad5be Thomas Thrainer
2259 763ad5be Thomas Thrainer
      (data_disk, meta_disk) = dev.children
2260 763ad5be Thomas Thrainer
      vg_data = data_disk.logical_id[0]
2261 cd3b4ff4 Helga Velroyen
      lv_data = objects.Disk(dev_type=constants.DT_PLAIN, size=dev.size,
2262 763ad5be Thomas Thrainer
                             logical_id=(vg_data, names[0]),
2263 763ad5be Thomas Thrainer
                             params=data_disk.params)
2264 763ad5be Thomas Thrainer
      vg_meta = meta_disk.logical_id[0]
2265 cd3b4ff4 Helga Velroyen
      lv_meta = objects.Disk(dev_type=constants.DT_PLAIN,
2266 763ad5be Thomas Thrainer
                             size=constants.DRBD_META_SIZE,
2267 763ad5be Thomas Thrainer
                             logical_id=(vg_meta, names[1]),
2268 763ad5be Thomas Thrainer
                             params=meta_disk.params)
2269 763ad5be Thomas Thrainer
2270 763ad5be Thomas Thrainer
      new_lvs = [lv_data, lv_meta]
2271 763ad5be Thomas Thrainer
      old_lvs = [child.Copy() for child in dev.children]
2272 763ad5be Thomas Thrainer
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
2273 1c3231aa Thomas Thrainer
      excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg, node_uuid)
2274 763ad5be Thomas Thrainer
2275 763ad5be Thomas Thrainer
      # we pass force_create=True to force the LVM creation
2276 763ad5be Thomas Thrainer
      for new_lv in new_lvs:
2277 f2b58d93 Thomas Thrainer
        try:
2278 dad226e3 Thomas Thrainer
          _CreateBlockDevInner(self.lu, node_uuid, self.instance, new_lv, True,
2279 f2b58d93 Thomas Thrainer
                               GetInstanceInfoText(self.instance), False,
2280 f2b58d93 Thomas Thrainer
                               excl_stor)
2281 f2b58d93 Thomas Thrainer
        except errors.DeviceCreationError, e:
2282 f2b58d93 Thomas Thrainer
          raise errors.OpExecError("Can't create block device: %s" % e.message)
2283 763ad5be Thomas Thrainer
2284 763ad5be Thomas Thrainer
    return iv_names
2285 763ad5be Thomas Thrainer
2286 1c3231aa Thomas Thrainer
  def _CheckDevices(self, node_uuid, iv_names):
2287 763ad5be Thomas Thrainer
    for name, (dev, _, _) in iv_names.iteritems():
2288 1c3231aa Thomas Thrainer
      self.cfg.SetDiskID(dev, node_uuid)
2289 763ad5be Thomas Thrainer
2290 1c3231aa Thomas Thrainer
      result = _BlockdevFind(self, node_uuid, dev, self.instance)
2291 763ad5be Thomas Thrainer
2292 763ad5be Thomas Thrainer
      msg = result.fail_msg
2293 763ad5be Thomas Thrainer
      if msg or not result.payload:
2294 763ad5be Thomas Thrainer
        if not msg:
2295 763ad5be Thomas Thrainer
          msg = "disk not found"
2296 763ad5be Thomas Thrainer
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
2297 763ad5be Thomas Thrainer
                                 (name, msg))
2298 763ad5be Thomas Thrainer
2299 763ad5be Thomas Thrainer
      if result.payload.is_degraded:
2300 763ad5be Thomas Thrainer
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
2301 763ad5be Thomas Thrainer
2302 1c3231aa Thomas Thrainer
  def _RemoveOldStorage(self, node_uuid, iv_names):
2303 763ad5be Thomas Thrainer
    for name, (_, old_lvs, _) in iv_names.iteritems():
2304 763ad5be Thomas Thrainer
      self.lu.LogInfo("Remove logical volumes for %s", name)
2305 763ad5be Thomas Thrainer
2306 763ad5be Thomas Thrainer
      for lv in old_lvs:
2307 1c3231aa Thomas Thrainer
        self.cfg.SetDiskID(lv, node_uuid)
2308 763ad5be Thomas Thrainer
2309 1c3231aa Thomas Thrainer
        msg = self.rpc.call_blockdev_remove(node_uuid, lv).fail_msg
2310 763ad5be Thomas Thrainer
        if msg:
2311 763ad5be Thomas Thrainer
          self.lu.LogWarning("Can't remove old LV: %s", msg,
2312 763ad5be Thomas Thrainer
                             hint="remove unused LVs manually")
2313 763ad5be Thomas Thrainer
2314 763ad5be Thomas Thrainer
  def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
2315 763ad5be Thomas Thrainer
    """Replace a disk on the primary or secondary for DRBD 8.
2316 763ad5be Thomas Thrainer

2317 763ad5be Thomas Thrainer
    The algorithm for replace is quite complicated:
2318 763ad5be Thomas Thrainer

2319 763ad5be Thomas Thrainer
      1. for each disk to be replaced:
2320 763ad5be Thomas Thrainer

2321 763ad5be Thomas Thrainer
        1. create new LVs on the target node with unique names
2322 763ad5be Thomas Thrainer
        1. detach old LVs from the drbd device
2323 763ad5be Thomas Thrainer
        1. rename old LVs to name_replaced.<time_t>
2324 763ad5be Thomas Thrainer
        1. rename new LVs to old LVs
2325 763ad5be Thomas Thrainer
        1. attach the new LVs (with the old names now) to the drbd device
2326 763ad5be Thomas Thrainer

2327 763ad5be Thomas Thrainer
      1. wait for sync across all devices
2328 763ad5be Thomas Thrainer

2329 763ad5be Thomas Thrainer
      1. for each modified disk:
2330 763ad5be Thomas Thrainer

2331 763ad5be Thomas Thrainer
        1. remove old LVs (which have the name name_replaces.<time_t>)
2332 763ad5be Thomas Thrainer

2333 763ad5be Thomas Thrainer
    Failures are not very well handled.
2334 763ad5be Thomas Thrainer

2335 763ad5be Thomas Thrainer
    """
2336 763ad5be Thomas Thrainer
    steps_total = 6
2337 763ad5be Thomas Thrainer
2338 763ad5be Thomas Thrainer
    # Step: check device activation
2339 763ad5be Thomas Thrainer
    self.lu.LogStep(1, steps_total, "Check device existence")
2340 1c3231aa Thomas Thrainer
    self._CheckDisksExistence([self.other_node_uuid, self.target_node_uuid])
2341 1c3231aa Thomas Thrainer
    self._CheckVolumeGroup([self.target_node_uuid, self.other_node_uuid])
2342 763ad5be Thomas Thrainer
2343 763ad5be Thomas Thrainer
    # Step: check other node consistency
2344 763ad5be Thomas Thrainer
    self.lu.LogStep(2, steps_total, "Check peer consistency")
2345 1c3231aa Thomas Thrainer
    self._CheckDisksConsistency(
2346 1c3231aa Thomas Thrainer
      self.other_node_uuid, self.other_node_uuid == self.instance.primary_node,
2347 1c3231aa Thomas Thrainer
      False)
2348 763ad5be Thomas Thrainer
2349 763ad5be Thomas Thrainer
    # Step: create new storage
2350 763ad5be Thomas Thrainer
    self.lu.LogStep(3, steps_total, "Allocate new storage")
2351 1c3231aa Thomas Thrainer
    iv_names = self._CreateNewStorage(self.target_node_uuid)
2352 763ad5be Thomas Thrainer
2353 763ad5be Thomas Thrainer
    # Step: for each lv, detach+rename*2+attach
2354 763ad5be Thomas Thrainer
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
2355 763ad5be Thomas Thrainer
    for dev, old_lvs, new_lvs in iv_names.itervalues():
2356 763ad5be Thomas Thrainer
      self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name)
2357 763ad5be Thomas Thrainer
2358 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_removechildren(self.target_node_uuid, dev,
2359 763ad5be Thomas Thrainer
                                                     old_lvs)
2360 763ad5be Thomas Thrainer
      result.Raise("Can't detach drbd from local storage on node"
2361 1c3231aa Thomas Thrainer
                   " %s for device %s" %
2362 1c3231aa Thomas Thrainer
                   (self.cfg.GetNodeName(self.target_node_uuid), dev.iv_name))
2363 763ad5be Thomas Thrainer
      #dev.children = []
2364 763ad5be Thomas Thrainer
      #cfg.Update(instance)
2365 763ad5be Thomas Thrainer
2366 763ad5be Thomas Thrainer
      # ok, we created the new LVs, so now we know we have the needed
2367 763ad5be Thomas Thrainer
      # storage; as such, we proceed on the target node to rename
2368 763ad5be Thomas Thrainer
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
2369 763ad5be Thomas Thrainer
      # using the assumption that logical_id == physical_id (which in
2370 763ad5be Thomas Thrainer
      # turn is the unique_id on that node)
2371 763ad5be Thomas Thrainer
2372 763ad5be Thomas Thrainer
      # FIXME(iustin): use a better name for the replaced LVs
2373 763ad5be Thomas Thrainer
      temp_suffix = int(time.time())
2374 763ad5be Thomas Thrainer
      ren_fn = lambda d, suff: (d.physical_id[0],
2375 763ad5be Thomas Thrainer
                                d.physical_id[1] + "_replaced-%s" % suff)
2376 763ad5be Thomas Thrainer
2377 763ad5be Thomas Thrainer
      # Build the rename list based on what LVs exist on the node
2378 763ad5be Thomas Thrainer
      rename_old_to_new = []
2379 763ad5be Thomas Thrainer
      for to_ren in old_lvs:
2380 1c3231aa Thomas Thrainer
        result = self.rpc.call_blockdev_find(self.target_node_uuid, to_ren)
2381 763ad5be Thomas Thrainer
        if not result.fail_msg and result.payload:
2382 763ad5be Thomas Thrainer
          # device exists
2383 763ad5be Thomas Thrainer
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
2384 763ad5be Thomas Thrainer
2385 763ad5be Thomas Thrainer
      self.lu.LogInfo("Renaming the old LVs on the target node")
2386 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_rename(self.target_node_uuid,
2387 763ad5be Thomas Thrainer
                                             rename_old_to_new)
2388 1c3231aa Thomas Thrainer
      result.Raise("Can't rename old LVs on node %s" %
2389 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(self.target_node_uuid))
2390 763ad5be Thomas Thrainer
2391 763ad5be Thomas Thrainer
      # Now we rename the new LVs to the old LVs
2392 763ad5be Thomas Thrainer
      self.lu.LogInfo("Renaming the new LVs on the target node")
2393 763ad5be Thomas Thrainer
      rename_new_to_old = [(new, old.physical_id)
2394 763ad5be Thomas Thrainer
                           for old, new in zip(old_lvs, new_lvs)]
2395 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_rename(self.target_node_uuid,
2396 763ad5be Thomas Thrainer
                                             rename_new_to_old)
2397 1c3231aa Thomas Thrainer
      result.Raise("Can't rename new LVs on node %s" %
2398 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(self.target_node_uuid))
2399 763ad5be Thomas Thrainer
2400 763ad5be Thomas Thrainer
      # Intermediate steps of in memory modifications
2401 763ad5be Thomas Thrainer
      for old, new in zip(old_lvs, new_lvs):
2402 763ad5be Thomas Thrainer
        new.logical_id = old.logical_id
2403 1c3231aa Thomas Thrainer
        self.cfg.SetDiskID(new, self.target_node_uuid)
2404 763ad5be Thomas Thrainer
2405 763ad5be Thomas Thrainer
      # We need to modify old_lvs so that removal later removes the
2406 763ad5be Thomas Thrainer
      # right LVs, not the newly added ones; note that old_lvs is a
2407 763ad5be Thomas Thrainer
      # copy here
2408 763ad5be Thomas Thrainer
      for disk in old_lvs:
2409 763ad5be Thomas Thrainer
        disk.logical_id = ren_fn(disk, temp_suffix)
2410 1c3231aa Thomas Thrainer
        self.cfg.SetDiskID(disk, self.target_node_uuid)
2411 763ad5be Thomas Thrainer
2412 763ad5be Thomas Thrainer
      # Now that the new lvs have the old name, we can add them to the device
2413 1c3231aa Thomas Thrainer
      self.lu.LogInfo("Adding new mirror component on %s",
2414 1c3231aa Thomas Thrainer
                      self.cfg.GetNodeName(self.target_node_uuid))
2415 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_addchildren(self.target_node_uuid,
2416 763ad5be Thomas Thrainer
                                                  (dev, self.instance), new_lvs)
2417 763ad5be Thomas Thrainer
      msg = result.fail_msg
2418 763ad5be Thomas Thrainer
      if msg:
2419 763ad5be Thomas Thrainer
        for new_lv in new_lvs:
2420 1c3231aa Thomas Thrainer
          msg2 = self.rpc.call_blockdev_remove(self.target_node_uuid,
2421 763ad5be Thomas Thrainer
                                               new_lv).fail_msg
2422 763ad5be Thomas Thrainer
          if msg2:
2423 763ad5be Thomas Thrainer
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
2424 763ad5be Thomas Thrainer
                               hint=("cleanup manually the unused logical"
2425 763ad5be Thomas Thrainer
                                     "volumes"))
2426 763ad5be Thomas Thrainer
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
2427 763ad5be Thomas Thrainer
2428 763ad5be Thomas Thrainer
    cstep = itertools.count(5)
2429 763ad5be Thomas Thrainer
2430 763ad5be Thomas Thrainer
    if self.early_release:
2431 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2432 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)
2433 763ad5be Thomas Thrainer
      # TODO: Check if releasing locks early still makes sense
2434 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
2435 763ad5be Thomas Thrainer
    else:
2436 763ad5be Thomas Thrainer
      # Release all resource locks except those used by the instance
2437 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
2438 5eacbcae Thomas Thrainer
                   keep=self.node_secondary_ip.keys())
2439 763ad5be Thomas Thrainer
2440 763ad5be Thomas Thrainer
    # Release all node locks while waiting for sync
2441 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE)
2442 763ad5be Thomas Thrainer
2443 763ad5be Thomas Thrainer
    # TODO: Can the instance lock be downgraded here? Take the optional disk
2444 763ad5be Thomas Thrainer
    # shutdown in the caller into consideration.
2445 763ad5be Thomas Thrainer
2446 763ad5be Thomas Thrainer
    # Wait for sync
2447 763ad5be Thomas Thrainer
    # This can fail as the old devices are degraded and _WaitForSync
2448 763ad5be Thomas Thrainer
    # does a combined result over all disks, so we don't check its return value
2449 763ad5be Thomas Thrainer
    self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
2450 5eacbcae Thomas Thrainer
    WaitForSync(self.lu, self.instance)
2451 763ad5be Thomas Thrainer
2452 763ad5be Thomas Thrainer
    # Check all devices manually
2453 763ad5be Thomas Thrainer
    self._CheckDevices(self.instance.primary_node, iv_names)
2454 763ad5be Thomas Thrainer
2455 763ad5be Thomas Thrainer
    # Step: remove old storage
2456 763ad5be Thomas Thrainer
    if not self.early_release:
2457 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2458 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)
2459 763ad5be Thomas Thrainer
2460 763ad5be Thomas Thrainer
  def _ExecDrbd8Secondary(self, feedback_fn):
2461 763ad5be Thomas Thrainer
    """Replace the secondary node for DRBD 8.
2462 763ad5be Thomas Thrainer

2463 763ad5be Thomas Thrainer
    The algorithm for replace is quite complicated:
2464 763ad5be Thomas Thrainer
      - for all disks of the instance:
2465 763ad5be Thomas Thrainer
        - create new LVs on the new node with same names
2466 763ad5be Thomas Thrainer
        - shutdown the drbd device on the old secondary
2467 763ad5be Thomas Thrainer
        - disconnect the drbd network on the primary
2468 763ad5be Thomas Thrainer
        - create the drbd device on the new secondary
2469 763ad5be Thomas Thrainer
        - network attach the drbd on the primary, using an artifice:
2470 763ad5be Thomas Thrainer
          the drbd code for Attach() will connect to the network if it
2471 763ad5be Thomas Thrainer
          finds a device which is connected to the good local disks but
2472 763ad5be Thomas Thrainer
          not network enabled
2473 763ad5be Thomas Thrainer
      - wait for sync across all devices
2474 763ad5be Thomas Thrainer
      - remove all disks from the old secondary
2475 763ad5be Thomas Thrainer

2476 763ad5be Thomas Thrainer
    Failures are not very well handled.
2477 763ad5be Thomas Thrainer

2478 763ad5be Thomas Thrainer
    """
2479 763ad5be Thomas Thrainer
    steps_total = 6
2480 763ad5be Thomas Thrainer
2481 763ad5be Thomas Thrainer
    pnode = self.instance.primary_node
2482 763ad5be Thomas Thrainer
2483 763ad5be Thomas Thrainer
    # Step: check device activation
2484 763ad5be Thomas Thrainer
    self.lu.LogStep(1, steps_total, "Check device existence")
2485 763ad5be Thomas Thrainer
    self._CheckDisksExistence([self.instance.primary_node])
2486 763ad5be Thomas Thrainer
    self._CheckVolumeGroup([self.instance.primary_node])
2487 763ad5be Thomas Thrainer
2488 763ad5be Thomas Thrainer
    # Step: check other node consistency
2489 763ad5be Thomas Thrainer
    self.lu.LogStep(2, steps_total, "Check peer consistency")
2490 763ad5be Thomas Thrainer
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
2491 763ad5be Thomas Thrainer
2492 763ad5be Thomas Thrainer
    # Step: create new storage
2493 763ad5be Thomas Thrainer
    self.lu.LogStep(3, steps_total, "Allocate new storage")
2494 5eacbcae Thomas Thrainer
    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
2495 1c3231aa Thomas Thrainer
    excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg,
2496 1c3231aa Thomas Thrainer
                                                  self.new_node_uuid)
2497 763ad5be Thomas Thrainer
    for idx, dev in enumerate(disks):
2498 763ad5be Thomas Thrainer
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
2499 1c3231aa Thomas Thrainer
                      (self.cfg.GetNodeName(self.new_node_uuid), idx))
2500 763ad5be Thomas Thrainer
      # we pass force_create=True to force LVM creation
2501 763ad5be Thomas Thrainer
      for new_lv in dev.children:
2502 f2b58d93 Thomas Thrainer
        try:
2503 dad226e3 Thomas Thrainer
          _CreateBlockDevInner(self.lu, self.new_node_uuid, self.instance,
2504 dad226e3 Thomas Thrainer
                               new_lv, True, GetInstanceInfoText(self.instance),
2505 dad226e3 Thomas Thrainer
                               False, excl_stor)
2506 f2b58d93 Thomas Thrainer
        except errors.DeviceCreationError, e:
2507 f2b58d93 Thomas Thrainer
          raise errors.OpExecError("Can't create block device: %s" % e.message)
2508 763ad5be Thomas Thrainer
2509 763ad5be Thomas Thrainer
    # Step 4: dbrd minors and drbd setups changes
2510 763ad5be Thomas Thrainer
    # after this, we must manually remove the drbd minors on both the
2511 763ad5be Thomas Thrainer
    # error and the success paths
2512 763ad5be Thomas Thrainer
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
2513 1c3231aa Thomas Thrainer
    minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
2514 1c3231aa Thomas Thrainer
                                         for _ in self.instance.disks],
2515 da4a52a3 Thomas Thrainer
                                        self.instance.uuid)
2516 763ad5be Thomas Thrainer
    logging.debug("Allocated minors %r", minors)
2517 763ad5be Thomas Thrainer
2518 763ad5be Thomas Thrainer
    iv_names = {}
2519 763ad5be Thomas Thrainer
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
2520 763ad5be Thomas Thrainer
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
2521 1c3231aa Thomas Thrainer
                      (self.cfg.GetNodeName(self.new_node_uuid), idx))
2522 763ad5be Thomas Thrainer
      # create new devices on new_node; note that we create two IDs:
2523 763ad5be Thomas Thrainer
      # one without port, so the drbd will be activated without
2524 763ad5be Thomas Thrainer
      # networking information on the new node at this stage, and one
2525 763ad5be Thomas Thrainer
      # with network, for the latter activation in step 4
2526 763ad5be Thomas Thrainer
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
2527 763ad5be Thomas Thrainer
      if self.instance.primary_node == o_node1:
2528 763ad5be Thomas Thrainer
        p_minor = o_minor1
2529 763ad5be Thomas Thrainer
      else:
2530 763ad5be Thomas Thrainer
        assert self.instance.primary_node == o_node2, "Three-node instance?"
2531 763ad5be Thomas Thrainer
        p_minor = o_minor2
2532 763ad5be Thomas Thrainer
2533 1c3231aa Thomas Thrainer
      new_alone_id = (self.instance.primary_node, self.new_node_uuid, None,
2534 763ad5be Thomas Thrainer
                      p_minor, new_minor, o_secret)
2535 1c3231aa Thomas Thrainer
      new_net_id = (self.instance.primary_node, self.new_node_uuid, o_port,
2536 763ad5be Thomas Thrainer
                    p_minor, new_minor, o_secret)
2537 763ad5be Thomas Thrainer
2538 763ad5be Thomas Thrainer
      iv_names[idx] = (dev, dev.children, new_net_id)
2539 763ad5be Thomas Thrainer
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
2540 763ad5be Thomas Thrainer
                    new_net_id)
2541 cd3b4ff4 Helga Velroyen
      new_drbd = objects.Disk(dev_type=constants.DT_DRBD8,
2542 763ad5be Thomas Thrainer
                              logical_id=new_alone_id,
2543 763ad5be Thomas Thrainer
                              children=dev.children,
2544 763ad5be Thomas Thrainer
                              size=dev.size,
2545 763ad5be Thomas Thrainer
                              params={})
2546 5eacbcae Thomas Thrainer
      (anno_new_drbd,) = AnnotateDiskParams(self.instance, [new_drbd],
2547 5eacbcae Thomas Thrainer
                                            self.cfg)
2548 763ad5be Thomas Thrainer
      try:
2549 1c3231aa Thomas Thrainer
        CreateSingleBlockDev(self.lu, self.new_node_uuid, self.instance,
2550 5eacbcae Thomas Thrainer
                             anno_new_drbd,
2551 5eacbcae Thomas Thrainer
                             GetInstanceInfoText(self.instance), False,
2552 5eacbcae Thomas Thrainer
                             excl_stor)
2553 763ad5be Thomas Thrainer
      except errors.GenericError:
2554 da4a52a3 Thomas Thrainer
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2555 763ad5be Thomas Thrainer
        raise
2556 763ad5be Thomas Thrainer
2557 763ad5be Thomas Thrainer
    # We have new devices, shutdown the drbd on the old secondary
2558 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2559 763ad5be Thomas Thrainer
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
2560 1c3231aa Thomas Thrainer
      self.cfg.SetDiskID(dev, self.target_node_uuid)
2561 1c3231aa Thomas Thrainer
      msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
2562 763ad5be Thomas Thrainer
                                            (dev, self.instance)).fail_msg
2563 763ad5be Thomas Thrainer
      if msg:
2564 763ad5be Thomas Thrainer
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
2565 763ad5be Thomas Thrainer
                           "node: %s" % (idx, msg),
2566 763ad5be Thomas Thrainer
                           hint=("Please cleanup this device manually as"
2567 763ad5be Thomas Thrainer
                                 " soon as possible"))
2568 763ad5be Thomas Thrainer
2569 763ad5be Thomas Thrainer
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
2570 763ad5be Thomas Thrainer
    result = self.rpc.call_drbd_disconnect_net([pnode], self.node_secondary_ip,
2571 763ad5be Thomas Thrainer
                                               self.instance.disks)[pnode]
2572 763ad5be Thomas Thrainer
2573 763ad5be Thomas Thrainer
    msg = result.fail_msg
2574 763ad5be Thomas Thrainer
    if msg:
2575 763ad5be Thomas Thrainer
      # detaches didn't succeed (unlikely)
2576 da4a52a3 Thomas Thrainer
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2577 763ad5be Thomas Thrainer
      raise errors.OpExecError("Can't detach the disks from the network on"
2578 763ad5be Thomas Thrainer
                               " old node: %s" % (msg,))
2579 763ad5be Thomas Thrainer
2580 763ad5be Thomas Thrainer
    # if we managed to detach at least one, we update all the disks of
2581 763ad5be Thomas Thrainer
    # the instance to point to the new secondary
2582 763ad5be Thomas Thrainer
    self.lu.LogInfo("Updating instance configuration")
2583 763ad5be Thomas Thrainer
    for dev, _, new_logical_id in iv_names.itervalues():
2584 763ad5be Thomas Thrainer
      dev.logical_id = new_logical_id
2585 763ad5be Thomas Thrainer
      self.cfg.SetDiskID(dev, self.instance.primary_node)
2586 763ad5be Thomas Thrainer
2587 763ad5be Thomas Thrainer
    self.cfg.Update(self.instance, feedback_fn)
2588 763ad5be Thomas Thrainer
2589 763ad5be Thomas Thrainer
    # Release all node locks (the configuration has been updated)
2590 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE)
2591 763ad5be Thomas Thrainer
2592 763ad5be Thomas Thrainer
    # and now perform the drbd attach
2593 763ad5be Thomas Thrainer
    self.lu.LogInfo("Attaching primary drbds to new secondary"
2594 763ad5be Thomas Thrainer
                    " (standalone => connected)")
2595 763ad5be Thomas Thrainer
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
2596 1c3231aa Thomas Thrainer
                                            self.new_node_uuid],
2597 763ad5be Thomas Thrainer
                                           self.node_secondary_ip,
2598 763ad5be Thomas Thrainer
                                           (self.instance.disks, self.instance),
2599 763ad5be Thomas Thrainer
                                           self.instance.name,
2600 763ad5be Thomas Thrainer
                                           False)
2601 763ad5be Thomas Thrainer
    for to_node, to_result in result.items():
2602 763ad5be Thomas Thrainer
      msg = to_result.fail_msg
2603 763ad5be Thomas Thrainer
      if msg:
2604 763ad5be Thomas Thrainer
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
2605 1c3231aa Thomas Thrainer
                           self.cfg.GetNodeName(to_node), msg,
2606 763ad5be Thomas Thrainer
                           hint=("please do a gnt-instance info to see the"
2607 763ad5be Thomas Thrainer
                                 " status of disks"))
2608 763ad5be Thomas Thrainer
2609 763ad5be Thomas Thrainer
    cstep = itertools.count(5)
2610 763ad5be Thomas Thrainer
2611 763ad5be Thomas Thrainer
    if self.early_release:
2612 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2613 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)
2614 763ad5be Thomas Thrainer
      # TODO: Check if releasing locks early still makes sense
2615 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
2616 763ad5be Thomas Thrainer
    else:
2617 763ad5be Thomas Thrainer
      # Release all resource locks except those used by the instance
2618 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
2619 5eacbcae Thomas Thrainer
                   keep=self.node_secondary_ip.keys())
2620 763ad5be Thomas Thrainer
2621 763ad5be Thomas Thrainer
    # TODO: Can the instance lock be downgraded here? Take the optional disk
2622 763ad5be Thomas Thrainer
    # shutdown in the caller into consideration.
2623 763ad5be Thomas Thrainer
2624 763ad5be Thomas Thrainer
    # Wait for sync
2625 763ad5be Thomas Thrainer
    # This can fail as the old devices are degraded and _WaitForSync
2626 763ad5be Thomas Thrainer
    # does a combined result over all disks, so we don't check its return value
2627 763ad5be Thomas Thrainer
    self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
2628 5eacbcae Thomas Thrainer
    WaitForSync(self.lu, self.instance)
2629 763ad5be Thomas Thrainer
2630 763ad5be Thomas Thrainer
    # Check all devices manually
2631 763ad5be Thomas Thrainer
    self._CheckDevices(self.instance.primary_node, iv_names)
2632 763ad5be Thomas Thrainer
2633 763ad5be Thomas Thrainer
    # Step: remove old storage
2634 763ad5be Thomas Thrainer
    if not self.early_release:
2635 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2636 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)