Statistics
| Branch: | Tag: | Revision:

root / lib / cmdlib / instance_storage.py @ 4869595d

History | View | Annotate | Download (98.9 kB)

1 763ad5be Thomas Thrainer
#
2 763ad5be Thomas Thrainer
#
3 763ad5be Thomas Thrainer
4 763ad5be Thomas Thrainer
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 763ad5be Thomas Thrainer
#
6 763ad5be Thomas Thrainer
# This program is free software; you can redistribute it and/or modify
7 763ad5be Thomas Thrainer
# it under the terms of the GNU General Public License as published by
8 763ad5be Thomas Thrainer
# the Free Software Foundation; either version 2 of the License, or
9 763ad5be Thomas Thrainer
# (at your option) any later version.
10 763ad5be Thomas Thrainer
#
11 763ad5be Thomas Thrainer
# This program is distributed in the hope that it will be useful, but
12 763ad5be Thomas Thrainer
# WITHOUT ANY WARRANTY; without even the implied warranty of
13 763ad5be Thomas Thrainer
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14 763ad5be Thomas Thrainer
# General Public License for more details.
15 763ad5be Thomas Thrainer
#
16 763ad5be Thomas Thrainer
# You should have received a copy of the GNU General Public License
17 763ad5be Thomas Thrainer
# along with this program; if not, write to the Free Software
18 763ad5be Thomas Thrainer
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 763ad5be Thomas Thrainer
# 02110-1301, USA.
20 763ad5be Thomas Thrainer
21 763ad5be Thomas Thrainer
22 763ad5be Thomas Thrainer
"""Logical units dealing with storage of instances."""
23 763ad5be Thomas Thrainer
24 763ad5be Thomas Thrainer
import itertools
25 763ad5be Thomas Thrainer
import logging
26 763ad5be Thomas Thrainer
import os
27 763ad5be Thomas Thrainer
import time
28 763ad5be Thomas Thrainer
29 763ad5be Thomas Thrainer
from ganeti import compat
30 763ad5be Thomas Thrainer
from ganeti import constants
31 763ad5be Thomas Thrainer
from ganeti import errors
32 763ad5be Thomas Thrainer
from ganeti import ht
33 763ad5be Thomas Thrainer
from ganeti import locking
34 763ad5be Thomas Thrainer
from ganeti.masterd import iallocator
35 763ad5be Thomas Thrainer
from ganeti import objects
36 763ad5be Thomas Thrainer
from ganeti import utils
37 4869595d Petr Pudlak
import ganeti.rpc.node as rpc
38 763ad5be Thomas Thrainer
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
39 763ad5be Thomas Thrainer
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
40 1c3231aa Thomas Thrainer
  AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeUuidAndName, \
41 5eacbcae Thomas Thrainer
  CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
42 1f7c8208 Helga Velroyen
  IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes, \
43 1f7c8208 Helga Velroyen
  CheckDiskTemplateEnabled
44 5eacbcae Thomas Thrainer
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
45 5eacbcae Thomas Thrainer
  CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
46 5eacbcae Thomas Thrainer
  BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
47 763ad5be Thomas Thrainer
48 763ad5be Thomas Thrainer
import ganeti.masterd.instance
49 763ad5be Thomas Thrainer
50 763ad5be Thomas Thrainer
51 763ad5be Thomas Thrainer
_DISK_TEMPLATE_NAME_PREFIX = {
52 763ad5be Thomas Thrainer
  constants.DT_PLAIN: "",
53 763ad5be Thomas Thrainer
  constants.DT_RBD: ".rbd",
54 763ad5be Thomas Thrainer
  constants.DT_EXT: ".ext",
55 763ad5be Thomas Thrainer
  }
56 763ad5be Thomas Thrainer
57 763ad5be Thomas Thrainer
58 1c3231aa Thomas Thrainer
def CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
59 5eacbcae Thomas Thrainer
                         excl_stor):
60 763ad5be Thomas Thrainer
  """Create a single block device on a given node.
61 763ad5be Thomas Thrainer

62 763ad5be Thomas Thrainer
  This will not recurse over children of the device, so they must be
63 763ad5be Thomas Thrainer
  created in advance.
64 763ad5be Thomas Thrainer

65 763ad5be Thomas Thrainer
  @param lu: the lu on whose behalf we execute
66 1c3231aa Thomas Thrainer
  @param node_uuid: the node on which to create the device
67 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
68 763ad5be Thomas Thrainer
  @param instance: the instance which owns the device
69 763ad5be Thomas Thrainer
  @type device: L{objects.Disk}
70 763ad5be Thomas Thrainer
  @param device: the device to create
71 763ad5be Thomas Thrainer
  @param info: the extra 'metadata' we should attach to the device
72 763ad5be Thomas Thrainer
      (this will be represented as a LVM tag)
73 763ad5be Thomas Thrainer
  @type force_open: boolean
74 763ad5be Thomas Thrainer
  @param force_open: this parameter will be passes to the
75 763ad5be Thomas Thrainer
      L{backend.BlockdevCreate} function where it specifies
76 763ad5be Thomas Thrainer
      whether we run on primary or not, and it affects both
77 763ad5be Thomas Thrainer
      the child assembly and the device own Open() execution
78 763ad5be Thomas Thrainer
  @type excl_stor: boolean
79 763ad5be Thomas Thrainer
  @param excl_stor: Whether exclusive_storage is active for the node
80 763ad5be Thomas Thrainer

81 763ad5be Thomas Thrainer
  """
82 0c3d9c7c Thomas Thrainer
  result = lu.rpc.call_blockdev_create(node_uuid, (device, instance),
83 0c3d9c7c Thomas Thrainer
                                       device.size, instance.name, force_open,
84 0c3d9c7c Thomas Thrainer
                                       info, excl_stor)
85 763ad5be Thomas Thrainer
  result.Raise("Can't create block device %s on"
86 1c3231aa Thomas Thrainer
               " node %s for instance %s" % (device,
87 1c3231aa Thomas Thrainer
                                             lu.cfg.GetNodeName(node_uuid),
88 1c3231aa Thomas Thrainer
                                             instance.name))
89 763ad5be Thomas Thrainer
90 763ad5be Thomas Thrainer
91 1c3231aa Thomas Thrainer
def _CreateBlockDevInner(lu, node_uuid, instance, device, force_create,
92 763ad5be Thomas Thrainer
                         info, force_open, excl_stor):
93 763ad5be Thomas Thrainer
  """Create a tree of block devices on a given node.
94 763ad5be Thomas Thrainer

95 763ad5be Thomas Thrainer
  If this device type has to be created on secondaries, create it and
96 763ad5be Thomas Thrainer
  all its children.
97 763ad5be Thomas Thrainer

98 763ad5be Thomas Thrainer
  If not, just recurse to children keeping the same 'force' value.
99 763ad5be Thomas Thrainer

100 763ad5be Thomas Thrainer
  @attention: The device has to be annotated already.
101 763ad5be Thomas Thrainer

102 763ad5be Thomas Thrainer
  @param lu: the lu on whose behalf we execute
103 1c3231aa Thomas Thrainer
  @param node_uuid: the node on which to create the device
104 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
105 763ad5be Thomas Thrainer
  @param instance: the instance which owns the device
106 763ad5be Thomas Thrainer
  @type device: L{objects.Disk}
107 763ad5be Thomas Thrainer
  @param device: the device to create
108 763ad5be Thomas Thrainer
  @type force_create: boolean
109 763ad5be Thomas Thrainer
  @param force_create: whether to force creation of this device; this
110 763ad5be Thomas Thrainer
      will be change to True whenever we find a device which has
111 763ad5be Thomas Thrainer
      CreateOnSecondary() attribute
112 763ad5be Thomas Thrainer
  @param info: the extra 'metadata' we should attach to the device
113 763ad5be Thomas Thrainer
      (this will be represented as a LVM tag)
114 763ad5be Thomas Thrainer
  @type force_open: boolean
115 763ad5be Thomas Thrainer
  @param force_open: this parameter will be passes to the
116 763ad5be Thomas Thrainer
      L{backend.BlockdevCreate} function where it specifies
117 763ad5be Thomas Thrainer
      whether we run on primary or not, and it affects both
118 763ad5be Thomas Thrainer
      the child assembly and the device own Open() execution
119 763ad5be Thomas Thrainer
  @type excl_stor: boolean
120 763ad5be Thomas Thrainer
  @param excl_stor: Whether exclusive_storage is active for the node
121 763ad5be Thomas Thrainer

122 763ad5be Thomas Thrainer
  @return: list of created devices
123 763ad5be Thomas Thrainer
  """
124 763ad5be Thomas Thrainer
  created_devices = []
125 763ad5be Thomas Thrainer
  try:
126 763ad5be Thomas Thrainer
    if device.CreateOnSecondary():
127 763ad5be Thomas Thrainer
      force_create = True
128 763ad5be Thomas Thrainer
129 763ad5be Thomas Thrainer
    if device.children:
130 763ad5be Thomas Thrainer
      for child in device.children:
131 1c3231aa Thomas Thrainer
        devs = _CreateBlockDevInner(lu, node_uuid, instance, child,
132 1c3231aa Thomas Thrainer
                                    force_create, info, force_open, excl_stor)
133 763ad5be Thomas Thrainer
        created_devices.extend(devs)
134 763ad5be Thomas Thrainer
135 763ad5be Thomas Thrainer
    if not force_create:
136 763ad5be Thomas Thrainer
      return created_devices
137 763ad5be Thomas Thrainer
138 1c3231aa Thomas Thrainer
    CreateSingleBlockDev(lu, node_uuid, instance, device, info, force_open,
139 5eacbcae Thomas Thrainer
                         excl_stor)
140 763ad5be Thomas Thrainer
    # The device has been completely created, so there is no point in keeping
141 763ad5be Thomas Thrainer
    # its subdevices in the list. We just add the device itself instead.
142 1c3231aa Thomas Thrainer
    created_devices = [(node_uuid, device)]
143 763ad5be Thomas Thrainer
    return created_devices
144 763ad5be Thomas Thrainer
145 763ad5be Thomas Thrainer
  except errors.DeviceCreationError, e:
146 763ad5be Thomas Thrainer
    e.created_devices.extend(created_devices)
147 763ad5be Thomas Thrainer
    raise e
148 763ad5be Thomas Thrainer
  except errors.OpExecError, e:
149 763ad5be Thomas Thrainer
    raise errors.DeviceCreationError(str(e), created_devices)
150 763ad5be Thomas Thrainer
151 763ad5be Thomas Thrainer
152 1c3231aa Thomas Thrainer
def IsExclusiveStorageEnabledNodeUuid(cfg, node_uuid):
153 763ad5be Thomas Thrainer
  """Whether exclusive_storage is in effect for the given node.
154 763ad5be Thomas Thrainer

155 763ad5be Thomas Thrainer
  @type cfg: L{config.ConfigWriter}
156 763ad5be Thomas Thrainer
  @param cfg: The cluster configuration
157 1c3231aa Thomas Thrainer
  @type node_uuid: string
158 1c3231aa Thomas Thrainer
  @param node_uuid: The node UUID
159 763ad5be Thomas Thrainer
  @rtype: bool
160 763ad5be Thomas Thrainer
  @return: The effective value of exclusive_storage
161 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if no node exists with the given name
162 763ad5be Thomas Thrainer

163 763ad5be Thomas Thrainer
  """
164 1c3231aa Thomas Thrainer
  ni = cfg.GetNodeInfo(node_uuid)
165 763ad5be Thomas Thrainer
  if ni is None:
166 1c3231aa Thomas Thrainer
    raise errors.OpPrereqError("Invalid node UUID %s" % node_uuid,
167 763ad5be Thomas Thrainer
                               errors.ECODE_NOENT)
168 5eacbcae Thomas Thrainer
  return IsExclusiveStorageEnabledNode(cfg, ni)
169 763ad5be Thomas Thrainer
170 763ad5be Thomas Thrainer
171 1c3231aa Thomas Thrainer
def _CreateBlockDev(lu, node_uuid, instance, device, force_create, info,
172 763ad5be Thomas Thrainer
                    force_open):
173 763ad5be Thomas Thrainer
  """Wrapper around L{_CreateBlockDevInner}.
174 763ad5be Thomas Thrainer

175 763ad5be Thomas Thrainer
  This method annotates the root device first.
176 763ad5be Thomas Thrainer

177 763ad5be Thomas Thrainer
  """
178 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [device], lu.cfg)
179 1c3231aa Thomas Thrainer
  excl_stor = IsExclusiveStorageEnabledNodeUuid(lu.cfg, node_uuid)
180 1c3231aa Thomas Thrainer
  return _CreateBlockDevInner(lu, node_uuid, instance, disk, force_create, info,
181 763ad5be Thomas Thrainer
                              force_open, excl_stor)
182 763ad5be Thomas Thrainer
183 763ad5be Thomas Thrainer
184 0c3d9c7c Thomas Thrainer
def _UndoCreateDisks(lu, disks_created, instance):
185 a365b47f Bernardo Dal Seno
  """Undo the work performed by L{CreateDisks}.
186 a365b47f Bernardo Dal Seno

187 a365b47f Bernardo Dal Seno
  This function is called in case of an error to undo the work of
188 a365b47f Bernardo Dal Seno
  L{CreateDisks}.
189 a365b47f Bernardo Dal Seno

190 a365b47f Bernardo Dal Seno
  @type lu: L{LogicalUnit}
191 a365b47f Bernardo Dal Seno
  @param lu: the logical unit on whose behalf we execute
192 a365b47f Bernardo Dal Seno
  @param disks_created: the result returned by L{CreateDisks}
193 0c3d9c7c Thomas Thrainer
  @type instance: L{objects.Instance}
194 0c3d9c7c Thomas Thrainer
  @param instance: the instance for which disks were created
195 a365b47f Bernardo Dal Seno

196 a365b47f Bernardo Dal Seno
  """
197 1c3231aa Thomas Thrainer
  for (node_uuid, disk) in disks_created:
198 0c3d9c7c Thomas Thrainer
    result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
199 c7dd65be Klaus Aehlig
    result.Warn("Failed to remove newly-created disk %s on node %s" %
200 1c3231aa Thomas Thrainer
                (disk, lu.cfg.GetNodeName(node_uuid)), logging.warning)
201 a365b47f Bernardo Dal Seno
202 a365b47f Bernardo Dal Seno
203 1c3231aa Thomas Thrainer
def CreateDisks(lu, instance, to_skip=None, target_node_uuid=None, disks=None):
204 763ad5be Thomas Thrainer
  """Create all disks for an instance.
205 763ad5be Thomas Thrainer

206 763ad5be Thomas Thrainer
  This abstracts away some work from AddInstance.
207 763ad5be Thomas Thrainer

208 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
209 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
210 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
211 763ad5be Thomas Thrainer
  @param instance: the instance whose disks we should create
212 763ad5be Thomas Thrainer
  @type to_skip: list
213 763ad5be Thomas Thrainer
  @param to_skip: list of indices to skip
214 1c3231aa Thomas Thrainer
  @type target_node_uuid: string
215 1c3231aa Thomas Thrainer
  @param target_node_uuid: if passed, overrides the target node for creation
216 a365b47f Bernardo Dal Seno
  @type disks: list of {objects.Disk}
217 a365b47f Bernardo Dal Seno
  @param disks: the disks to create; if not specified, all the disks of the
218 a365b47f Bernardo Dal Seno
      instance are created
219 a365b47f Bernardo Dal Seno
  @return: information about the created disks, to be used to call
220 a365b47f Bernardo Dal Seno
      L{_UndoCreateDisks}
221 a365b47f Bernardo Dal Seno
  @raise errors.OpPrereqError: in case of error
222 763ad5be Thomas Thrainer

223 763ad5be Thomas Thrainer
  """
224 5eacbcae Thomas Thrainer
  info = GetInstanceInfoText(instance)
225 1c3231aa Thomas Thrainer
  if target_node_uuid is None:
226 1c3231aa Thomas Thrainer
    pnode_uuid = instance.primary_node
227 1c3231aa Thomas Thrainer
    all_node_uuids = instance.all_nodes
228 763ad5be Thomas Thrainer
  else:
229 1c3231aa Thomas Thrainer
    pnode_uuid = target_node_uuid
230 1c3231aa Thomas Thrainer
    all_node_uuids = [pnode_uuid]
231 763ad5be Thomas Thrainer
232 a365b47f Bernardo Dal Seno
  if disks is None:
233 a365b47f Bernardo Dal Seno
    disks = instance.disks
234 a365b47f Bernardo Dal Seno
235 1f7c8208 Helga Velroyen
  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
236 1f7c8208 Helga Velroyen
237 763ad5be Thomas Thrainer
  if instance.disk_template in constants.DTS_FILEBASED:
238 763ad5be Thomas Thrainer
    file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
239 1c3231aa Thomas Thrainer
    result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
240 763ad5be Thomas Thrainer
241 763ad5be Thomas Thrainer
    result.Raise("Failed to create directory '%s' on"
242 1c3231aa Thomas Thrainer
                 " node %s" % (file_storage_dir,
243 1c3231aa Thomas Thrainer
                               lu.cfg.GetNodeName(pnode_uuid)))
244 763ad5be Thomas Thrainer
245 763ad5be Thomas Thrainer
  disks_created = []
246 a365b47f Bernardo Dal Seno
  for idx, device in enumerate(disks):
247 763ad5be Thomas Thrainer
    if to_skip and idx in to_skip:
248 763ad5be Thomas Thrainer
      continue
249 763ad5be Thomas Thrainer
    logging.info("Creating disk %s for instance '%s'", idx, instance.name)
250 1c3231aa Thomas Thrainer
    for node_uuid in all_node_uuids:
251 1c3231aa Thomas Thrainer
      f_create = node_uuid == pnode_uuid
252 763ad5be Thomas Thrainer
      try:
253 1c3231aa Thomas Thrainer
        _CreateBlockDev(lu, node_uuid, instance, device, f_create, info,
254 1c3231aa Thomas Thrainer
                        f_create)
255 1c3231aa Thomas Thrainer
        disks_created.append((node_uuid, device))
256 763ad5be Thomas Thrainer
      except errors.DeviceCreationError, e:
257 763ad5be Thomas Thrainer
        logging.warning("Creating disk %s for instance '%s' failed",
258 763ad5be Thomas Thrainer
                        idx, instance.name)
259 763ad5be Thomas Thrainer
        disks_created.extend(e.created_devices)
260 0c3d9c7c Thomas Thrainer
        _UndoCreateDisks(lu, disks_created, instance)
261 763ad5be Thomas Thrainer
        raise errors.OpExecError(e.message)
262 a365b47f Bernardo Dal Seno
  return disks_created
263 763ad5be Thomas Thrainer
264 763ad5be Thomas Thrainer
265 5eacbcae Thomas Thrainer
def ComputeDiskSizePerVG(disk_template, disks):
266 763ad5be Thomas Thrainer
  """Compute disk size requirements in the volume group
267 763ad5be Thomas Thrainer

268 763ad5be Thomas Thrainer
  """
269 763ad5be Thomas Thrainer
  def _compute(disks, payload):
270 763ad5be Thomas Thrainer
    """Universal algorithm.
271 763ad5be Thomas Thrainer

272 763ad5be Thomas Thrainer
    """
273 763ad5be Thomas Thrainer
    vgs = {}
274 763ad5be Thomas Thrainer
    for disk in disks:
275 763ad5be Thomas Thrainer
      vgs[disk[constants.IDISK_VG]] = \
276 763ad5be Thomas Thrainer
        vgs.get(constants.IDISK_VG, 0) + disk[constants.IDISK_SIZE] + payload
277 763ad5be Thomas Thrainer
278 763ad5be Thomas Thrainer
    return vgs
279 763ad5be Thomas Thrainer
280 763ad5be Thomas Thrainer
  # Required free disk space as a function of disk and swap space
281 763ad5be Thomas Thrainer
  req_size_dict = {
282 763ad5be Thomas Thrainer
    constants.DT_DISKLESS: {},
283 763ad5be Thomas Thrainer
    constants.DT_PLAIN: _compute(disks, 0),
284 763ad5be Thomas Thrainer
    # 128 MB are added for drbd metadata for each disk
285 763ad5be Thomas Thrainer
    constants.DT_DRBD8: _compute(disks, constants.DRBD_META_SIZE),
286 763ad5be Thomas Thrainer
    constants.DT_FILE: {},
287 763ad5be Thomas Thrainer
    constants.DT_SHARED_FILE: {},
288 8106dd64 Santi Raffa
    constants.DT_GLUSTER: {},
289 763ad5be Thomas Thrainer
    }
290 763ad5be Thomas Thrainer
291 763ad5be Thomas Thrainer
  if disk_template not in req_size_dict:
292 763ad5be Thomas Thrainer
    raise errors.ProgrammerError("Disk template '%s' size requirement"
293 763ad5be Thomas Thrainer
                                 " is unknown" % disk_template)
294 763ad5be Thomas Thrainer
295 763ad5be Thomas Thrainer
  return req_size_dict[disk_template]
296 763ad5be Thomas Thrainer
297 763ad5be Thomas Thrainer
298 5eacbcae Thomas Thrainer
def ComputeDisks(op, default_vg):
299 763ad5be Thomas Thrainer
  """Computes the instance disks.
300 763ad5be Thomas Thrainer

301 763ad5be Thomas Thrainer
  @param op: The instance opcode
302 763ad5be Thomas Thrainer
  @param default_vg: The default_vg to assume
303 763ad5be Thomas Thrainer

304 763ad5be Thomas Thrainer
  @return: The computed disks
305 763ad5be Thomas Thrainer

306 763ad5be Thomas Thrainer
  """
307 763ad5be Thomas Thrainer
  disks = []
308 763ad5be Thomas Thrainer
  for disk in op.disks:
309 763ad5be Thomas Thrainer
    mode = disk.get(constants.IDISK_MODE, constants.DISK_RDWR)
310 763ad5be Thomas Thrainer
    if mode not in constants.DISK_ACCESS_SET:
311 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk access mode '%s'" %
312 763ad5be Thomas Thrainer
                                 mode, errors.ECODE_INVAL)
313 763ad5be Thomas Thrainer
    size = disk.get(constants.IDISK_SIZE, None)
314 763ad5be Thomas Thrainer
    if size is None:
315 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Missing disk size", errors.ECODE_INVAL)
316 763ad5be Thomas Thrainer
    try:
317 763ad5be Thomas Thrainer
      size = int(size)
318 763ad5be Thomas Thrainer
    except (TypeError, ValueError):
319 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk size '%s'" % size,
320 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
321 763ad5be Thomas Thrainer
322 763ad5be Thomas Thrainer
    ext_provider = disk.get(constants.IDISK_PROVIDER, None)
323 763ad5be Thomas Thrainer
    if ext_provider and op.disk_template != constants.DT_EXT:
324 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The '%s' option is only valid for the %s"
325 763ad5be Thomas Thrainer
                                 " disk template, not %s" %
326 763ad5be Thomas Thrainer
                                 (constants.IDISK_PROVIDER, constants.DT_EXT,
327 763ad5be Thomas Thrainer
                                  op.disk_template), errors.ECODE_INVAL)
328 763ad5be Thomas Thrainer
329 763ad5be Thomas Thrainer
    data_vg = disk.get(constants.IDISK_VG, default_vg)
330 763ad5be Thomas Thrainer
    name = disk.get(constants.IDISK_NAME, None)
331 763ad5be Thomas Thrainer
    if name is not None and name.lower() == constants.VALUE_NONE:
332 763ad5be Thomas Thrainer
      name = None
333 763ad5be Thomas Thrainer
    new_disk = {
334 763ad5be Thomas Thrainer
      constants.IDISK_SIZE: size,
335 763ad5be Thomas Thrainer
      constants.IDISK_MODE: mode,
336 763ad5be Thomas Thrainer
      constants.IDISK_VG: data_vg,
337 763ad5be Thomas Thrainer
      constants.IDISK_NAME: name,
338 763ad5be Thomas Thrainer
      }
339 763ad5be Thomas Thrainer
340 3f3ea14c Bernardo Dal Seno
    for key in [
341 3f3ea14c Bernardo Dal Seno
      constants.IDISK_METAVG,
342 3f3ea14c Bernardo Dal Seno
      constants.IDISK_ADOPT,
343 3f3ea14c Bernardo Dal Seno
      constants.IDISK_SPINDLES,
344 3f3ea14c Bernardo Dal Seno
      ]:
345 3f3ea14c Bernardo Dal Seno
      if key in disk:
346 3f3ea14c Bernardo Dal Seno
        new_disk[key] = disk[key]
347 763ad5be Thomas Thrainer
348 763ad5be Thomas Thrainer
    # For extstorage, demand the `provider' option and add any
349 763ad5be Thomas Thrainer
    # additional parameters (ext-params) to the dict
350 763ad5be Thomas Thrainer
    if op.disk_template == constants.DT_EXT:
351 763ad5be Thomas Thrainer
      if ext_provider:
352 763ad5be Thomas Thrainer
        new_disk[constants.IDISK_PROVIDER] = ext_provider
353 763ad5be Thomas Thrainer
        for key in disk:
354 763ad5be Thomas Thrainer
          if key not in constants.IDISK_PARAMS:
355 763ad5be Thomas Thrainer
            new_disk[key] = disk[key]
356 763ad5be Thomas Thrainer
      else:
357 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Missing provider for template '%s'" %
358 763ad5be Thomas Thrainer
                                   constants.DT_EXT, errors.ECODE_INVAL)
359 763ad5be Thomas Thrainer
360 763ad5be Thomas Thrainer
    disks.append(new_disk)
361 763ad5be Thomas Thrainer
362 763ad5be Thomas Thrainer
  return disks
363 763ad5be Thomas Thrainer
364 763ad5be Thomas Thrainer
365 5eacbcae Thomas Thrainer
def CheckRADOSFreeSpace():
366 763ad5be Thomas Thrainer
  """Compute disk size requirements inside the RADOS cluster.
367 763ad5be Thomas Thrainer

368 763ad5be Thomas Thrainer
  """
369 763ad5be Thomas Thrainer
  # For the RADOS cluster we assume there is always enough space.
370 763ad5be Thomas Thrainer
  pass
371 763ad5be Thomas Thrainer
372 763ad5be Thomas Thrainer
373 1c3231aa Thomas Thrainer
def _GenerateDRBD8Branch(lu, primary_uuid, secondary_uuid, size, vgnames, names,
374 763ad5be Thomas Thrainer
                         iv_name, p_minor, s_minor):
375 763ad5be Thomas Thrainer
  """Generate a drbd8 device complete with its children.
376 763ad5be Thomas Thrainer

377 763ad5be Thomas Thrainer
  """
378 763ad5be Thomas Thrainer
  assert len(vgnames) == len(names) == 2
379 763ad5be Thomas Thrainer
  port = lu.cfg.AllocatePort()
380 763ad5be Thomas Thrainer
  shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
381 763ad5be Thomas Thrainer
382 cd3b4ff4 Helga Velroyen
  dev_data = objects.Disk(dev_type=constants.DT_PLAIN, size=size,
383 763ad5be Thomas Thrainer
                          logical_id=(vgnames[0], names[0]),
384 763ad5be Thomas Thrainer
                          params={})
385 763ad5be Thomas Thrainer
  dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
386 cd3b4ff4 Helga Velroyen
  dev_meta = objects.Disk(dev_type=constants.DT_PLAIN,
387 763ad5be Thomas Thrainer
                          size=constants.DRBD_META_SIZE,
388 763ad5be Thomas Thrainer
                          logical_id=(vgnames[1], names[1]),
389 763ad5be Thomas Thrainer
                          params={})
390 763ad5be Thomas Thrainer
  dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
391 cd3b4ff4 Helga Velroyen
  drbd_dev = objects.Disk(dev_type=constants.DT_DRBD8, size=size,
392 1c3231aa Thomas Thrainer
                          logical_id=(primary_uuid, secondary_uuid, port,
393 763ad5be Thomas Thrainer
                                      p_minor, s_minor,
394 763ad5be Thomas Thrainer
                                      shared_secret),
395 763ad5be Thomas Thrainer
                          children=[dev_data, dev_meta],
396 763ad5be Thomas Thrainer
                          iv_name=iv_name, params={})
397 763ad5be Thomas Thrainer
  drbd_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
398 763ad5be Thomas Thrainer
  return drbd_dev
399 763ad5be Thomas Thrainer
400 763ad5be Thomas Thrainer
401 5eacbcae Thomas Thrainer
def GenerateDiskTemplate(
402 da4a52a3 Thomas Thrainer
  lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
403 763ad5be Thomas Thrainer
  disk_info, file_storage_dir, file_driver, base_index,
404 850c53f1 Helga Velroyen
  feedback_fn, full_disk_params):
405 763ad5be Thomas Thrainer
  """Generate the entire disk layout for a given template type.
406 763ad5be Thomas Thrainer

407 763ad5be Thomas Thrainer
  """
408 763ad5be Thomas Thrainer
  vgname = lu.cfg.GetVGName()
409 763ad5be Thomas Thrainer
  disk_count = len(disk_info)
410 763ad5be Thomas Thrainer
  disks = []
411 763ad5be Thomas Thrainer
412 850c53f1 Helga Velroyen
  CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name)
413 850c53f1 Helga Velroyen
414 763ad5be Thomas Thrainer
  if template_name == constants.DT_DISKLESS:
415 763ad5be Thomas Thrainer
    pass
416 763ad5be Thomas Thrainer
  elif template_name == constants.DT_DRBD8:
417 1c3231aa Thomas Thrainer
    if len(secondary_node_uuids) != 1:
418 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Wrong template configuration")
419 1c3231aa Thomas Thrainer
    remote_node_uuid = secondary_node_uuids[0]
420 763ad5be Thomas Thrainer
    minors = lu.cfg.AllocateDRBDMinor(
421 da4a52a3 Thomas Thrainer
      [primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid)
422 763ad5be Thomas Thrainer
423 763ad5be Thomas Thrainer
    (drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
424 763ad5be Thomas Thrainer
                                                       full_disk_params)
425 763ad5be Thomas Thrainer
    drbd_default_metavg = drbd_params[constants.LDP_DEFAULT_METAVG]
426 763ad5be Thomas Thrainer
427 763ad5be Thomas Thrainer
    names = []
428 763ad5be Thomas Thrainer
    for lv_prefix in _GenerateUniqueNames(lu, [".disk%d" % (base_index + i)
429 763ad5be Thomas Thrainer
                                               for i in range(disk_count)]):
430 763ad5be Thomas Thrainer
      names.append(lv_prefix + "_data")
431 763ad5be Thomas Thrainer
      names.append(lv_prefix + "_meta")
432 763ad5be Thomas Thrainer
    for idx, disk in enumerate(disk_info):
433 763ad5be Thomas Thrainer
      disk_index = idx + base_index
434 763ad5be Thomas Thrainer
      data_vg = disk.get(constants.IDISK_VG, vgname)
435 763ad5be Thomas Thrainer
      meta_vg = disk.get(constants.IDISK_METAVG, drbd_default_metavg)
436 1c3231aa Thomas Thrainer
      disk_dev = _GenerateDRBD8Branch(lu, primary_node_uuid, remote_node_uuid,
437 763ad5be Thomas Thrainer
                                      disk[constants.IDISK_SIZE],
438 763ad5be Thomas Thrainer
                                      [data_vg, meta_vg],
439 763ad5be Thomas Thrainer
                                      names[idx * 2:idx * 2 + 2],
440 763ad5be Thomas Thrainer
                                      "disk/%d" % disk_index,
441 763ad5be Thomas Thrainer
                                      minors[idx * 2], minors[idx * 2 + 1])
442 763ad5be Thomas Thrainer
      disk_dev.mode = disk[constants.IDISK_MODE]
443 763ad5be Thomas Thrainer
      disk_dev.name = disk.get(constants.IDISK_NAME, None)
444 763ad5be Thomas Thrainer
      disks.append(disk_dev)
445 763ad5be Thomas Thrainer
  else:
446 1c3231aa Thomas Thrainer
    if secondary_node_uuids:
447 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Wrong template configuration")
448 763ad5be Thomas Thrainer
449 763ad5be Thomas Thrainer
    name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
450 763ad5be Thomas Thrainer
    if name_prefix is None:
451 763ad5be Thomas Thrainer
      names = None
452 763ad5be Thomas Thrainer
    else:
453 763ad5be Thomas Thrainer
      names = _GenerateUniqueNames(lu, ["%s.disk%s" %
454 763ad5be Thomas Thrainer
                                        (name_prefix, base_index + i)
455 763ad5be Thomas Thrainer
                                        for i in range(disk_count)])
456 763ad5be Thomas Thrainer
457 763ad5be Thomas Thrainer
    if template_name == constants.DT_PLAIN:
458 763ad5be Thomas Thrainer
459 763ad5be Thomas Thrainer
      def logical_id_fn(idx, _, disk):
460 763ad5be Thomas Thrainer
        vg = disk.get(constants.IDISK_VG, vgname)
461 763ad5be Thomas Thrainer
        return (vg, names[idx])
462 763ad5be Thomas Thrainer
463 845b7ed1 Santi Raffa
    elif template_name == constants.DT_GLUSTER:
464 845b7ed1 Santi Raffa
      logical_id_fn = lambda _1, disk_index, _2: \
465 845b7ed1 Santi Raffa
        (file_driver, "ganeti/%s.%d" % (instance_uuid,
466 845b7ed1 Santi Raffa
                                        disk_index))
467 845b7ed1 Santi Raffa
468 845b7ed1 Santi Raffa
    elif template_name in constants.DTS_FILEBASED: # Gluster handled above
469 763ad5be Thomas Thrainer
      logical_id_fn = \
470 763ad5be Thomas Thrainer
        lambda _, disk_index, disk: (file_driver,
471 763ad5be Thomas Thrainer
                                     "%s/disk%d" % (file_storage_dir,
472 763ad5be Thomas Thrainer
                                                    disk_index))
473 763ad5be Thomas Thrainer
    elif template_name == constants.DT_BLOCK:
474 763ad5be Thomas Thrainer
      logical_id_fn = \
475 763ad5be Thomas Thrainer
        lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
476 763ad5be Thomas Thrainer
                                       disk[constants.IDISK_ADOPT])
477 763ad5be Thomas Thrainer
    elif template_name == constants.DT_RBD:
478 763ad5be Thomas Thrainer
      logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
479 763ad5be Thomas Thrainer
    elif template_name == constants.DT_EXT:
480 763ad5be Thomas Thrainer
      def logical_id_fn(idx, _, disk):
481 763ad5be Thomas Thrainer
        provider = disk.get(constants.IDISK_PROVIDER, None)
482 763ad5be Thomas Thrainer
        if provider is None:
483 763ad5be Thomas Thrainer
          raise errors.ProgrammerError("Disk template is %s, but '%s' is"
484 763ad5be Thomas Thrainer
                                       " not found", constants.DT_EXT,
485 763ad5be Thomas Thrainer
                                       constants.IDISK_PROVIDER)
486 763ad5be Thomas Thrainer
        return (provider, names[idx])
487 763ad5be Thomas Thrainer
    else:
488 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
489 763ad5be Thomas Thrainer
490 cd3b4ff4 Helga Velroyen
    dev_type = template_name
491 763ad5be Thomas Thrainer
492 763ad5be Thomas Thrainer
    for idx, disk in enumerate(disk_info):
493 763ad5be Thomas Thrainer
      params = {}
494 763ad5be Thomas Thrainer
      # Only for the Ext template add disk_info to params
495 763ad5be Thomas Thrainer
      if template_name == constants.DT_EXT:
496 763ad5be Thomas Thrainer
        params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
497 763ad5be Thomas Thrainer
        for key in disk:
498 763ad5be Thomas Thrainer
          if key not in constants.IDISK_PARAMS:
499 763ad5be Thomas Thrainer
            params[key] = disk[key]
500 763ad5be Thomas Thrainer
      disk_index = idx + base_index
501 763ad5be Thomas Thrainer
      size = disk[constants.IDISK_SIZE]
502 763ad5be Thomas Thrainer
      feedback_fn("* disk %s, size %s" %
503 763ad5be Thomas Thrainer
                  (disk_index, utils.FormatUnit(size, "h")))
504 763ad5be Thomas Thrainer
      disk_dev = objects.Disk(dev_type=dev_type, size=size,
505 763ad5be Thomas Thrainer
                              logical_id=logical_id_fn(idx, disk_index, disk),
506 763ad5be Thomas Thrainer
                              iv_name="disk/%d" % disk_index,
507 763ad5be Thomas Thrainer
                              mode=disk[constants.IDISK_MODE],
508 b54ecf12 Bernardo Dal Seno
                              params=params,
509 b54ecf12 Bernardo Dal Seno
                              spindles=disk.get(constants.IDISK_SPINDLES))
510 763ad5be Thomas Thrainer
      disk_dev.name = disk.get(constants.IDISK_NAME, None)
511 763ad5be Thomas Thrainer
      disk_dev.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
512 763ad5be Thomas Thrainer
      disks.append(disk_dev)
513 763ad5be Thomas Thrainer
514 763ad5be Thomas Thrainer
  return disks
515 763ad5be Thomas Thrainer
516 763ad5be Thomas Thrainer
517 7c848a6a Bernardo Dal Seno
def CheckSpindlesExclusiveStorage(diskdict, es_flag, required):
518 3f3ea14c Bernardo Dal Seno
  """Check the presence of the spindle options with exclusive_storage.
519 3f3ea14c Bernardo Dal Seno

520 3f3ea14c Bernardo Dal Seno
  @type diskdict: dict
521 3f3ea14c Bernardo Dal Seno
  @param diskdict: disk parameters
522 3f3ea14c Bernardo Dal Seno
  @type es_flag: bool
523 3f3ea14c Bernardo Dal Seno
  @param es_flag: the effective value of the exlusive_storage flag
524 7c848a6a Bernardo Dal Seno
  @type required: bool
525 7c848a6a Bernardo Dal Seno
  @param required: whether spindles are required or just optional
526 3f3ea14c Bernardo Dal Seno
  @raise errors.OpPrereqError when spindles are given and they should not
527 3f3ea14c Bernardo Dal Seno

528 3f3ea14c Bernardo Dal Seno
  """
529 3f3ea14c Bernardo Dal Seno
  if (not es_flag and constants.IDISK_SPINDLES in diskdict and
530 3f3ea14c Bernardo Dal Seno
      diskdict[constants.IDISK_SPINDLES] is not None):
531 3f3ea14c Bernardo Dal Seno
    raise errors.OpPrereqError("Spindles in instance disks cannot be specified"
532 3f3ea14c Bernardo Dal Seno
                               " when exclusive storage is not active",
533 3f3ea14c Bernardo Dal Seno
                               errors.ECODE_INVAL)
534 7c848a6a Bernardo Dal Seno
  if (es_flag and required and (constants.IDISK_SPINDLES not in diskdict or
535 7c848a6a Bernardo Dal Seno
                                diskdict[constants.IDISK_SPINDLES] is None)):
536 7c848a6a Bernardo Dal Seno
    raise errors.OpPrereqError("You must specify spindles in instance disks"
537 7c848a6a Bernardo Dal Seno
                               " when exclusive storage is active",
538 7c848a6a Bernardo Dal Seno
                               errors.ECODE_INVAL)
539 3f3ea14c Bernardo Dal Seno
540 3f3ea14c Bernardo Dal Seno
541 763ad5be Thomas Thrainer
class LUInstanceRecreateDisks(LogicalUnit):
542 763ad5be Thomas Thrainer
  """Recreate an instance's missing disks.
543 763ad5be Thomas Thrainer

544 763ad5be Thomas Thrainer
  """
545 763ad5be Thomas Thrainer
  HPATH = "instance-recreate-disks"
546 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
547 763ad5be Thomas Thrainer
  REQ_BGL = False
548 763ad5be Thomas Thrainer
549 763ad5be Thomas Thrainer
  _MODIFYABLE = compat.UniqueFrozenset([
550 763ad5be Thomas Thrainer
    constants.IDISK_SIZE,
551 763ad5be Thomas Thrainer
    constants.IDISK_MODE,
552 c615590c Bernardo Dal Seno
    constants.IDISK_SPINDLES,
553 763ad5be Thomas Thrainer
    ])
554 763ad5be Thomas Thrainer
555 763ad5be Thomas Thrainer
  # New or changed disk parameters may have different semantics
556 763ad5be Thomas Thrainer
  assert constants.IDISK_PARAMS == (_MODIFYABLE | frozenset([
557 763ad5be Thomas Thrainer
    constants.IDISK_ADOPT,
558 763ad5be Thomas Thrainer
559 763ad5be Thomas Thrainer
    # TODO: Implement support changing VG while recreating
560 763ad5be Thomas Thrainer
    constants.IDISK_VG,
561 763ad5be Thomas Thrainer
    constants.IDISK_METAVG,
562 763ad5be Thomas Thrainer
    constants.IDISK_PROVIDER,
563 763ad5be Thomas Thrainer
    constants.IDISK_NAME,
564 763ad5be Thomas Thrainer
    ]))
565 763ad5be Thomas Thrainer
566 763ad5be Thomas Thrainer
  def _RunAllocator(self):
567 763ad5be Thomas Thrainer
    """Run the allocator based on input opcode.
568 763ad5be Thomas Thrainer

569 763ad5be Thomas Thrainer
    """
570 763ad5be Thomas Thrainer
    be_full = self.cfg.GetClusterInfo().FillBE(self.instance)
571 763ad5be Thomas Thrainer
572 763ad5be Thomas Thrainer
    # FIXME
573 763ad5be Thomas Thrainer
    # The allocator should actually run in "relocate" mode, but current
574 763ad5be Thomas Thrainer
    # allocators don't support relocating all the nodes of an instance at
575 763ad5be Thomas Thrainer
    # the same time. As a workaround we use "allocate" mode, but this is
576 763ad5be Thomas Thrainer
    # suboptimal for two reasons:
577 763ad5be Thomas Thrainer
    # - The instance name passed to the allocator is present in the list of
578 763ad5be Thomas Thrainer
    #   existing instances, so there could be a conflict within the
579 763ad5be Thomas Thrainer
    #   internal structures of the allocator. This doesn't happen with the
580 763ad5be Thomas Thrainer
    #   current allocators, but it's a liability.
581 763ad5be Thomas Thrainer
    # - The allocator counts the resources used by the instance twice: once
582 763ad5be Thomas Thrainer
    #   because the instance exists already, and once because it tries to
583 763ad5be Thomas Thrainer
    #   allocate a new instance.
584 763ad5be Thomas Thrainer
    # The allocator could choose some of the nodes on which the instance is
585 763ad5be Thomas Thrainer
    # running, but that's not a problem. If the instance nodes are broken,
586 763ad5be Thomas Thrainer
    # they should be already be marked as drained or offline, and hence
587 763ad5be Thomas Thrainer
    # skipped by the allocator. If instance disks have been lost for other
588 763ad5be Thomas Thrainer
    # reasons, then recreating the disks on the same nodes should be fine.
589 763ad5be Thomas Thrainer
    disk_template = self.instance.disk_template
590 763ad5be Thomas Thrainer
    spindle_use = be_full[constants.BE_SPINDLE_USE]
591 0e514de1 Bernardo Dal Seno
    disks = [{
592 0e514de1 Bernardo Dal Seno
      constants.IDISK_SIZE: d.size,
593 0e514de1 Bernardo Dal Seno
      constants.IDISK_MODE: d.mode,
594 0e514de1 Bernardo Dal Seno
      constants.IDISK_SPINDLES: d.spindles,
595 0e514de1 Bernardo Dal Seno
      } for d in self.instance.disks]
596 763ad5be Thomas Thrainer
    req = iallocator.IAReqInstanceAlloc(name=self.op.instance_name,
597 763ad5be Thomas Thrainer
                                        disk_template=disk_template,
598 763ad5be Thomas Thrainer
                                        tags=list(self.instance.GetTags()),
599 763ad5be Thomas Thrainer
                                        os=self.instance.os,
600 763ad5be Thomas Thrainer
                                        nics=[{}],
601 763ad5be Thomas Thrainer
                                        vcpus=be_full[constants.BE_VCPUS],
602 763ad5be Thomas Thrainer
                                        memory=be_full[constants.BE_MAXMEM],
603 763ad5be Thomas Thrainer
                                        spindle_use=spindle_use,
604 0e514de1 Bernardo Dal Seno
                                        disks=disks,
605 763ad5be Thomas Thrainer
                                        hypervisor=self.instance.hypervisor,
606 763ad5be Thomas Thrainer
                                        node_whitelist=None)
607 763ad5be Thomas Thrainer
    ial = iallocator.IAllocator(self.cfg, self.rpc, req)
608 763ad5be Thomas Thrainer
609 763ad5be Thomas Thrainer
    ial.Run(self.op.iallocator)
610 763ad5be Thomas Thrainer
611 763ad5be Thomas Thrainer
    assert req.RequiredNodes() == len(self.instance.all_nodes)
612 763ad5be Thomas Thrainer
613 763ad5be Thomas Thrainer
    if not ial.success:
614 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
615 763ad5be Thomas Thrainer
                                 " %s" % (self.op.iallocator, ial.info),
616 763ad5be Thomas Thrainer
                                 errors.ECODE_NORES)
617 763ad5be Thomas Thrainer
618 1c3231aa Thomas Thrainer
    (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, ial.result)
619 763ad5be Thomas Thrainer
    self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
620 763ad5be Thomas Thrainer
                 self.op.instance_name, self.op.iallocator,
621 1c3231aa Thomas Thrainer
                 utils.CommaJoin(self.op.nodes))
622 763ad5be Thomas Thrainer
623 763ad5be Thomas Thrainer
  def CheckArguments(self):
624 763ad5be Thomas Thrainer
    if self.op.disks and ht.TNonNegativeInt(self.op.disks[0]):
625 763ad5be Thomas Thrainer
      # Normalize and convert deprecated list of disk indices
626 763ad5be Thomas Thrainer
      self.op.disks = [(idx, {}) for idx in sorted(frozenset(self.op.disks))]
627 763ad5be Thomas Thrainer
628 763ad5be Thomas Thrainer
    duplicates = utils.FindDuplicates(map(compat.fst, self.op.disks))
629 763ad5be Thomas Thrainer
    if duplicates:
630 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Some disks have been specified more than"
631 763ad5be Thomas Thrainer
                                 " once: %s" % utils.CommaJoin(duplicates),
632 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
633 763ad5be Thomas Thrainer
634 763ad5be Thomas Thrainer
    # We don't want _CheckIAllocatorOrNode selecting the default iallocator
635 763ad5be Thomas Thrainer
    # when neither iallocator nor nodes are specified
636 763ad5be Thomas Thrainer
    if self.op.iallocator or self.op.nodes:
637 5eacbcae Thomas Thrainer
      CheckIAllocatorOrNode(self, "iallocator", "nodes")
638 763ad5be Thomas Thrainer
639 763ad5be Thomas Thrainer
    for (idx, params) in self.op.disks:
640 763ad5be Thomas Thrainer
      utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
641 763ad5be Thomas Thrainer
      unsupported = frozenset(params.keys()) - self._MODIFYABLE
642 763ad5be Thomas Thrainer
      if unsupported:
643 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Parameters for disk %s try to change"
644 763ad5be Thomas Thrainer
                                   " unmodifyable parameter(s): %s" %
645 763ad5be Thomas Thrainer
                                   (idx, utils.CommaJoin(unsupported)),
646 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
647 763ad5be Thomas Thrainer
648 763ad5be Thomas Thrainer
  def ExpandNames(self):
649 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
650 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
651 763ad5be Thomas Thrainer
652 763ad5be Thomas Thrainer
    if self.op.nodes:
653 1c3231aa Thomas Thrainer
      (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
654 1c3231aa Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = list(self.op.node_uuids)
655 763ad5be Thomas Thrainer
    else:
656 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = []
657 763ad5be Thomas Thrainer
      if self.op.iallocator:
658 763ad5be Thomas Thrainer
        # iallocator will select a new node in the same group
659 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
660 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
661 763ad5be Thomas Thrainer
662 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
663 763ad5be Thomas Thrainer
664 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
665 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODEGROUP:
666 763ad5be Thomas Thrainer
      assert self.op.iallocator is not None
667 763ad5be Thomas Thrainer
      assert not self.op.nodes
668 763ad5be Thomas Thrainer
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
669 763ad5be Thomas Thrainer
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
670 763ad5be Thomas Thrainer
      # Lock the primary group used by the instance optimistically; this
671 763ad5be Thomas Thrainer
      # requires going via the node before it's locked, requiring
672 763ad5be Thomas Thrainer
      # verification later on
673 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
674 da4a52a3 Thomas Thrainer
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid, primary_only=True)
675 763ad5be Thomas Thrainer
676 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE:
677 763ad5be Thomas Thrainer
      # If an allocator is used, then we lock all the nodes in the current
678 763ad5be Thomas Thrainer
      # instance group, as we don't know yet which ones will be selected;
679 763ad5be Thomas Thrainer
      # if we replace the nodes without using an allocator, locks are
680 763ad5be Thomas Thrainer
      # already declared in ExpandNames; otherwise, we need to lock all the
681 763ad5be Thomas Thrainer
      # instance nodes for disk re-creation
682 763ad5be Thomas Thrainer
      if self.op.iallocator:
683 763ad5be Thomas Thrainer
        assert not self.op.nodes
684 763ad5be Thomas Thrainer
        assert not self.needed_locks[locking.LEVEL_NODE]
685 763ad5be Thomas Thrainer
        assert len(self.owned_locks(locking.LEVEL_NODEGROUP)) == 1
686 763ad5be Thomas Thrainer
687 763ad5be Thomas Thrainer
        # Lock member nodes of the group of the primary node
688 763ad5be Thomas Thrainer
        for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP):
689 763ad5be Thomas Thrainer
          self.needed_locks[locking.LEVEL_NODE].extend(
690 763ad5be Thomas Thrainer
            self.cfg.GetNodeGroup(group_uuid).members)
691 763ad5be Thomas Thrainer
692 763ad5be Thomas Thrainer
        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
693 763ad5be Thomas Thrainer
      elif not self.op.nodes:
694 763ad5be Thomas Thrainer
        self._LockInstancesNodes(primary_only=False)
695 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
696 763ad5be Thomas Thrainer
      # Copy node locks
697 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
698 5eacbcae Thomas Thrainer
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
699 763ad5be Thomas Thrainer
700 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
701 763ad5be Thomas Thrainer
    """Build hooks env.
702 763ad5be Thomas Thrainer

703 763ad5be Thomas Thrainer
    This runs on master, primary and secondary nodes of the instance.
704 763ad5be Thomas Thrainer

705 763ad5be Thomas Thrainer
    """
706 5eacbcae Thomas Thrainer
    return BuildInstanceHookEnvByObject(self, self.instance)
707 763ad5be Thomas Thrainer
708 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
709 763ad5be Thomas Thrainer
    """Build hooks nodes.
710 763ad5be Thomas Thrainer

711 763ad5be Thomas Thrainer
    """
712 763ad5be Thomas Thrainer
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
713 763ad5be Thomas Thrainer
    return (nl, nl)
714 763ad5be Thomas Thrainer
715 763ad5be Thomas Thrainer
  def CheckPrereq(self):
716 763ad5be Thomas Thrainer
    """Check prerequisites.
717 763ad5be Thomas Thrainer

718 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster and is not running.
719 763ad5be Thomas Thrainer

720 763ad5be Thomas Thrainer
    """
721 da4a52a3 Thomas Thrainer
    instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
722 763ad5be Thomas Thrainer
    assert instance is not None, \
723 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
724 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
725 1c3231aa Thomas Thrainer
      if len(self.op.node_uuids) != len(instance.all_nodes):
726 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Instance %s currently has %d nodes, but"
727 763ad5be Thomas Thrainer
                                   " %d replacement nodes were specified" %
728 763ad5be Thomas Thrainer
                                   (instance.name, len(instance.all_nodes),
729 1c3231aa Thomas Thrainer
                                    len(self.op.node_uuids)),
730 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
731 763ad5be Thomas Thrainer
      assert instance.disk_template != constants.DT_DRBD8 or \
732 1c3231aa Thomas Thrainer
             len(self.op.node_uuids) == 2
733 763ad5be Thomas Thrainer
      assert instance.disk_template != constants.DT_PLAIN or \
734 1c3231aa Thomas Thrainer
             len(self.op.node_uuids) == 1
735 1c3231aa Thomas Thrainer
      primary_node = self.op.node_uuids[0]
736 763ad5be Thomas Thrainer
    else:
737 763ad5be Thomas Thrainer
      primary_node = instance.primary_node
738 763ad5be Thomas Thrainer
    if not self.op.iallocator:
739 5eacbcae Thomas Thrainer
      CheckNodeOnline(self, primary_node)
740 763ad5be Thomas Thrainer
741 763ad5be Thomas Thrainer
    if instance.disk_template == constants.DT_DISKLESS:
742 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Instance '%s' has no disks" %
743 763ad5be Thomas Thrainer
                                 self.op.instance_name, errors.ECODE_INVAL)
744 763ad5be Thomas Thrainer
745 763ad5be Thomas Thrainer
    # Verify if node group locks are still correct
746 763ad5be Thomas Thrainer
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
747 763ad5be Thomas Thrainer
    if owned_groups:
748 763ad5be Thomas Thrainer
      # Node group locks are acquired only for the primary node (and only
749 763ad5be Thomas Thrainer
      # when the allocator is used)
750 da4a52a3 Thomas Thrainer
      CheckInstanceNodeGroups(self.cfg, instance.uuid, owned_groups,
751 5eacbcae Thomas Thrainer
                              primary_only=True)
752 763ad5be Thomas Thrainer
753 763ad5be Thomas Thrainer
    # if we replace nodes *and* the old primary is offline, we don't
754 763ad5be Thomas Thrainer
    # check the instance state
755 763ad5be Thomas Thrainer
    old_pnode = self.cfg.GetNodeInfo(instance.primary_node)
756 1c3231aa Thomas Thrainer
    if not ((self.op.iallocator or self.op.node_uuids) and old_pnode.offline):
757 5eacbcae Thomas Thrainer
      CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
758 5eacbcae Thomas Thrainer
                         msg="cannot recreate disks")
759 763ad5be Thomas Thrainer
760 763ad5be Thomas Thrainer
    if self.op.disks:
761 763ad5be Thomas Thrainer
      self.disks = dict(self.op.disks)
762 763ad5be Thomas Thrainer
    else:
763 763ad5be Thomas Thrainer
      self.disks = dict((idx, {}) for idx in range(len(instance.disks)))
764 763ad5be Thomas Thrainer
765 763ad5be Thomas Thrainer
    maxidx = max(self.disks.keys())
766 763ad5be Thomas Thrainer
    if maxidx >= len(instance.disks):
767 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Invalid disk index '%s'" % maxidx,
768 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
769 763ad5be Thomas Thrainer
770 1c3231aa Thomas Thrainer
    if ((self.op.node_uuids or self.op.iallocator) and
771 763ad5be Thomas Thrainer
         sorted(self.disks.keys()) != range(len(instance.disks))):
772 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't recreate disks partially and"
773 763ad5be Thomas Thrainer
                                 " change the nodes at the same time",
774 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
775 763ad5be Thomas Thrainer
776 763ad5be Thomas Thrainer
    self.instance = instance
777 763ad5be Thomas Thrainer
778 763ad5be Thomas Thrainer
    if self.op.iallocator:
779 763ad5be Thomas Thrainer
      self._RunAllocator()
780 763ad5be Thomas Thrainer
      # Release unneeded node and node resource locks
781 1c3231aa Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.node_uuids)
782 1c3231aa Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.node_uuids)
783 5eacbcae Thomas Thrainer
      ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
784 763ad5be Thomas Thrainer
785 763ad5be Thomas Thrainer
    assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
786 763ad5be Thomas Thrainer
787 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
788 1c3231aa Thomas Thrainer
      node_uuids = self.op.node_uuids
789 3f3ea14c Bernardo Dal Seno
    else:
790 1c3231aa Thomas Thrainer
      node_uuids = instance.all_nodes
791 3f3ea14c Bernardo Dal Seno
    excl_stor = compat.any(
792 1c3231aa Thomas Thrainer
      rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids).values()
793 3f3ea14c Bernardo Dal Seno
      )
794 3f3ea14c Bernardo Dal Seno
    for new_params in self.disks.values():
795 7c848a6a Bernardo Dal Seno
      CheckSpindlesExclusiveStorage(new_params, excl_stor, False)
796 3f3ea14c Bernardo Dal Seno
797 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
798 763ad5be Thomas Thrainer
    """Recreate the disks.
799 763ad5be Thomas Thrainer

800 763ad5be Thomas Thrainer
    """
801 763ad5be Thomas Thrainer
    assert (self.owned_locks(locking.LEVEL_NODE) ==
802 763ad5be Thomas Thrainer
            self.owned_locks(locking.LEVEL_NODE_RES))
803 763ad5be Thomas Thrainer
804 763ad5be Thomas Thrainer
    to_skip = []
805 763ad5be Thomas Thrainer
    mods = [] # keeps track of needed changes
806 763ad5be Thomas Thrainer
807 d0d7d7cf Thomas Thrainer
    for idx, disk in enumerate(self.instance.disks):
808 763ad5be Thomas Thrainer
      try:
809 763ad5be Thomas Thrainer
        changes = self.disks[idx]
810 763ad5be Thomas Thrainer
      except KeyError:
811 763ad5be Thomas Thrainer
        # Disk should not be recreated
812 763ad5be Thomas Thrainer
        to_skip.append(idx)
813 763ad5be Thomas Thrainer
        continue
814 763ad5be Thomas Thrainer
815 763ad5be Thomas Thrainer
      # update secondaries for disks, if needed
816 cd3b4ff4 Helga Velroyen
      if self.op.node_uuids and disk.dev_type == constants.DT_DRBD8:
817 763ad5be Thomas Thrainer
        # need to update the nodes and minors
818 1c3231aa Thomas Thrainer
        assert len(self.op.node_uuids) == 2
819 763ad5be Thomas Thrainer
        assert len(disk.logical_id) == 6 # otherwise disk internals
820 763ad5be Thomas Thrainer
                                         # have changed
821 763ad5be Thomas Thrainer
        (_, _, old_port, _, _, old_secret) = disk.logical_id
822 1c3231aa Thomas Thrainer
        new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids,
823 da4a52a3 Thomas Thrainer
                                                self.instance.uuid)
824 1c3231aa Thomas Thrainer
        new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port,
825 763ad5be Thomas Thrainer
                  new_minors[0], new_minors[1], old_secret)
826 763ad5be Thomas Thrainer
        assert len(disk.logical_id) == len(new_id)
827 763ad5be Thomas Thrainer
      else:
828 763ad5be Thomas Thrainer
        new_id = None
829 763ad5be Thomas Thrainer
830 763ad5be Thomas Thrainer
      mods.append((idx, new_id, changes))
831 763ad5be Thomas Thrainer
832 763ad5be Thomas Thrainer
    # now that we have passed all asserts above, we can apply the mods
833 763ad5be Thomas Thrainer
    # in a single run (to avoid partial changes)
834 763ad5be Thomas Thrainer
    for idx, new_id, changes in mods:
835 d0d7d7cf Thomas Thrainer
      disk = self.instance.disks[idx]
836 763ad5be Thomas Thrainer
      if new_id is not None:
837 cd3b4ff4 Helga Velroyen
        assert disk.dev_type == constants.DT_DRBD8
838 763ad5be Thomas Thrainer
        disk.logical_id = new_id
839 763ad5be Thomas Thrainer
      if changes:
840 763ad5be Thomas Thrainer
        disk.Update(size=changes.get(constants.IDISK_SIZE, None),
841 b54ecf12 Bernardo Dal Seno
                    mode=changes.get(constants.IDISK_MODE, None),
842 b54ecf12 Bernardo Dal Seno
                    spindles=changes.get(constants.IDISK_SPINDLES, None))
843 763ad5be Thomas Thrainer
844 763ad5be Thomas Thrainer
    # change primary node, if needed
845 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
846 d0d7d7cf Thomas Thrainer
      self.instance.primary_node = self.op.node_uuids[0]
847 763ad5be Thomas Thrainer
      self.LogWarning("Changing the instance's nodes, you will have to"
848 763ad5be Thomas Thrainer
                      " remove any disks left on the older nodes manually")
849 763ad5be Thomas Thrainer
850 1c3231aa Thomas Thrainer
    if self.op.node_uuids:
851 d0d7d7cf Thomas Thrainer
      self.cfg.Update(self.instance, feedback_fn)
852 763ad5be Thomas Thrainer
853 763ad5be Thomas Thrainer
    # All touched nodes must be locked
854 763ad5be Thomas Thrainer
    mylocks = self.owned_locks(locking.LEVEL_NODE)
855 d0d7d7cf Thomas Thrainer
    assert mylocks.issuperset(frozenset(self.instance.all_nodes))
856 d0d7d7cf Thomas Thrainer
    new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
857 a365b47f Bernardo Dal Seno
858 a365b47f Bernardo Dal Seno
    # TODO: Release node locks before wiping, or explain why it's not possible
859 a365b47f Bernardo Dal Seno
    if self.cfg.GetClusterInfo().prealloc_wipe_disks:
860 a365b47f Bernardo Dal Seno
      wipedisks = [(idx, disk, 0)
861 d0d7d7cf Thomas Thrainer
                   for (idx, disk) in enumerate(self.instance.disks)
862 a365b47f Bernardo Dal Seno
                   if idx not in to_skip]
863 d0d7d7cf Thomas Thrainer
      WipeOrCleanupDisks(self, self.instance, disks=wipedisks,
864 d0d7d7cf Thomas Thrainer
                         cleanup=new_disks)
865 763ad5be Thomas Thrainer
866 763ad5be Thomas Thrainer
867 d90f0cb4 Helga Velroyen
def _PerformNodeInfoCall(lu, node_uuids, vg):
868 d90f0cb4 Helga Velroyen
  """Prepares the input and performs a node info call.
869 d90f0cb4 Helga Velroyen

870 d90f0cb4 Helga Velroyen
  @type lu: C{LogicalUnit}
871 d90f0cb4 Helga Velroyen
  @param lu: a logical unit from which we get configuration data
872 d90f0cb4 Helga Velroyen
  @type node_uuids: list of string
873 d90f0cb4 Helga Velroyen
  @param node_uuids: list of node UUIDs to perform the call for
874 d90f0cb4 Helga Velroyen
  @type vg: string
875 d90f0cb4 Helga Velroyen
  @param vg: the volume group's name
876 d90f0cb4 Helga Velroyen

877 d90f0cb4 Helga Velroyen
  """
878 d90f0cb4 Helga Velroyen
  lvm_storage_units = [(constants.ST_LVM_VG, vg)]
879 d90f0cb4 Helga Velroyen
  storage_units = rpc.PrepareStorageUnitsForNodes(lu.cfg, lvm_storage_units,
880 d90f0cb4 Helga Velroyen
                                                  node_uuids)
881 d90f0cb4 Helga Velroyen
  hvname = lu.cfg.GetHypervisorType()
882 d90f0cb4 Helga Velroyen
  hvparams = lu.cfg.GetClusterInfo().hvparams
883 d90f0cb4 Helga Velroyen
  nodeinfo = lu.rpc.call_node_info(node_uuids, storage_units,
884 d90f0cb4 Helga Velroyen
                                   [(hvname, hvparams[hvname])])
885 d90f0cb4 Helga Velroyen
  return nodeinfo
886 d90f0cb4 Helga Velroyen
887 d90f0cb4 Helga Velroyen
888 d90f0cb4 Helga Velroyen
def _CheckVgCapacityForNode(node_name, node_info, vg, requested):
889 d90f0cb4 Helga Velroyen
  """Checks the vg capacity for a given node.
890 d90f0cb4 Helga Velroyen

891 d90f0cb4 Helga Velroyen
  @type node_info: tuple (_, list of dicts, _)
892 d90f0cb4 Helga Velroyen
  @param node_info: the result of the node info call for one node
893 d90f0cb4 Helga Velroyen
  @type node_name: string
894 d90f0cb4 Helga Velroyen
  @param node_name: the name of the node
895 d90f0cb4 Helga Velroyen
  @type vg: string
896 d90f0cb4 Helga Velroyen
  @param vg: volume group name
897 d90f0cb4 Helga Velroyen
  @type requested: int
898 d90f0cb4 Helga Velroyen
  @param requested: the amount of disk in MiB to check for
899 d90f0cb4 Helga Velroyen
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
900 d90f0cb4 Helga Velroyen
      or we cannot check the node
901 d90f0cb4 Helga Velroyen

902 d90f0cb4 Helga Velroyen
  """
903 d90f0cb4 Helga Velroyen
  (_, space_info, _) = node_info
904 d90f0cb4 Helga Velroyen
  lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
905 d90f0cb4 Helga Velroyen
      space_info, constants.ST_LVM_VG)
906 d90f0cb4 Helga Velroyen
  if not lvm_vg_info:
907 d90f0cb4 Helga Velroyen
    raise errors.OpPrereqError("Can't retrieve storage information for LVM")
908 d90f0cb4 Helga Velroyen
  vg_free = lvm_vg_info.get("storage_free", None)
909 d90f0cb4 Helga Velroyen
  if not isinstance(vg_free, int):
910 d90f0cb4 Helga Velroyen
    raise errors.OpPrereqError("Can't compute free disk space on node"
911 d90f0cb4 Helga Velroyen
                               " %s for vg %s, result was '%s'" %
912 d90f0cb4 Helga Velroyen
                               (node_name, vg, vg_free), errors.ECODE_ENVIRON)
913 d90f0cb4 Helga Velroyen
  if requested > vg_free:
914 d90f0cb4 Helga Velroyen
    raise errors.OpPrereqError("Not enough disk space on target node %s"
915 d90f0cb4 Helga Velroyen
                               " vg %s: required %d MiB, available %d MiB" %
916 d90f0cb4 Helga Velroyen
                               (node_name, vg, requested, vg_free),
917 d90f0cb4 Helga Velroyen
                               errors.ECODE_NORES)
918 d90f0cb4 Helga Velroyen
919 d90f0cb4 Helga Velroyen
920 1c3231aa Thomas Thrainer
def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
921 763ad5be Thomas Thrainer
  """Checks if nodes have enough free disk space in the specified VG.
922 763ad5be Thomas Thrainer

923 763ad5be Thomas Thrainer
  This function checks if all given nodes have the needed amount of
924 763ad5be Thomas Thrainer
  free disk. In case any node has less disk or we cannot get the
925 763ad5be Thomas Thrainer
  information from the node, this function raises an OpPrereqError
926 763ad5be Thomas Thrainer
  exception.
927 763ad5be Thomas Thrainer

928 763ad5be Thomas Thrainer
  @type lu: C{LogicalUnit}
929 763ad5be Thomas Thrainer
  @param lu: a logical unit from which we get configuration data
930 1c3231aa Thomas Thrainer
  @type node_uuids: C{list}
931 1c3231aa Thomas Thrainer
  @param node_uuids: the list of node UUIDs to check
932 763ad5be Thomas Thrainer
  @type vg: C{str}
933 763ad5be Thomas Thrainer
  @param vg: the volume group to check
934 763ad5be Thomas Thrainer
  @type requested: C{int}
935 763ad5be Thomas Thrainer
  @param requested: the amount of disk in MiB to check for
936 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
937 763ad5be Thomas Thrainer
      or we cannot check the node
938 763ad5be Thomas Thrainer

939 763ad5be Thomas Thrainer
  """
940 d90f0cb4 Helga Velroyen
  nodeinfo = _PerformNodeInfoCall(lu, node_uuids, vg)
941 f667baab Thomas Thrainer
  for node_uuid in node_uuids:
942 f667baab Thomas Thrainer
    node_name = lu.cfg.GetNodeName(node_uuid)
943 f667baab Thomas Thrainer
    info = nodeinfo[node_uuid]
944 1c3231aa Thomas Thrainer
    info.Raise("Cannot get current information from node %s" % node_name,
945 763ad5be Thomas Thrainer
               prereq=True, ecode=errors.ECODE_ENVIRON)
946 d90f0cb4 Helga Velroyen
    _CheckVgCapacityForNode(node_name, info.payload, vg, requested)
947 763ad5be Thomas Thrainer
948 763ad5be Thomas Thrainer
949 1c3231aa Thomas Thrainer
def CheckNodesFreeDiskPerVG(lu, node_uuids, req_sizes):
950 763ad5be Thomas Thrainer
  """Checks if nodes have enough free disk space in all the VGs.
951 763ad5be Thomas Thrainer

952 763ad5be Thomas Thrainer
  This function checks if all given nodes have the needed amount of
953 763ad5be Thomas Thrainer
  free disk. In case any node has less disk or we cannot get the
954 763ad5be Thomas Thrainer
  information from the node, this function raises an OpPrereqError
955 763ad5be Thomas Thrainer
  exception.
956 763ad5be Thomas Thrainer

957 763ad5be Thomas Thrainer
  @type lu: C{LogicalUnit}
958 763ad5be Thomas Thrainer
  @param lu: a logical unit from which we get configuration data
959 1c3231aa Thomas Thrainer
  @type node_uuids: C{list}
960 1c3231aa Thomas Thrainer
  @param node_uuids: the list of node UUIDs to check
961 763ad5be Thomas Thrainer
  @type req_sizes: C{dict}
962 763ad5be Thomas Thrainer
  @param req_sizes: the hash of vg and corresponding amount of disk in
963 763ad5be Thomas Thrainer
      MiB to check for
964 763ad5be Thomas Thrainer
  @raise errors.OpPrereqError: if the node doesn't have enough disk,
965 763ad5be Thomas Thrainer
      or we cannot check the node
966 763ad5be Thomas Thrainer

967 763ad5be Thomas Thrainer
  """
968 763ad5be Thomas Thrainer
  for vg, req_size in req_sizes.items():
969 1c3231aa Thomas Thrainer
    _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, req_size)
970 763ad5be Thomas Thrainer
971 763ad5be Thomas Thrainer
972 763ad5be Thomas Thrainer
def _DiskSizeInBytesToMebibytes(lu, size):
973 763ad5be Thomas Thrainer
  """Converts a disk size in bytes to mebibytes.
974 763ad5be Thomas Thrainer

975 763ad5be Thomas Thrainer
  Warns and rounds up if the size isn't an even multiple of 1 MiB.
976 763ad5be Thomas Thrainer

977 763ad5be Thomas Thrainer
  """
978 763ad5be Thomas Thrainer
  (mib, remainder) = divmod(size, 1024 * 1024)
979 763ad5be Thomas Thrainer
980 763ad5be Thomas Thrainer
  if remainder != 0:
981 763ad5be Thomas Thrainer
    lu.LogWarning("Disk size is not an even multiple of 1 MiB; rounding up"
982 763ad5be Thomas Thrainer
                  " to not overwrite existing data (%s bytes will not be"
983 763ad5be Thomas Thrainer
                  " wiped)", (1024 * 1024) - remainder)
984 763ad5be Thomas Thrainer
    mib += 1
985 763ad5be Thomas Thrainer
986 763ad5be Thomas Thrainer
  return mib
987 763ad5be Thomas Thrainer
988 763ad5be Thomas Thrainer
989 763ad5be Thomas Thrainer
def _CalcEta(time_taken, written, total_size):
990 763ad5be Thomas Thrainer
  """Calculates the ETA based on size written and total size.
991 763ad5be Thomas Thrainer

992 763ad5be Thomas Thrainer
  @param time_taken: The time taken so far
993 763ad5be Thomas Thrainer
  @param written: amount written so far
994 763ad5be Thomas Thrainer
  @param total_size: The total size of data to be written
995 763ad5be Thomas Thrainer
  @return: The remaining time in seconds
996 763ad5be Thomas Thrainer

997 763ad5be Thomas Thrainer
  """
998 763ad5be Thomas Thrainer
  avg_time = time_taken / float(written)
999 763ad5be Thomas Thrainer
  return (total_size - written) * avg_time
1000 763ad5be Thomas Thrainer
1001 763ad5be Thomas Thrainer
1002 5eacbcae Thomas Thrainer
def WipeDisks(lu, instance, disks=None):
1003 763ad5be Thomas Thrainer
  """Wipes instance disks.
1004 763ad5be Thomas Thrainer

1005 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
1006 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
1007 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
1008 763ad5be Thomas Thrainer
  @param instance: the instance whose disks we should create
1009 763ad5be Thomas Thrainer
  @type disks: None or list of tuple of (number, L{objects.Disk}, number)
1010 763ad5be Thomas Thrainer
  @param disks: Disk details; tuple contains disk index, disk object and the
1011 763ad5be Thomas Thrainer
    start offset
1012 763ad5be Thomas Thrainer

1013 763ad5be Thomas Thrainer
  """
1014 1c3231aa Thomas Thrainer
  node_uuid = instance.primary_node
1015 1c3231aa Thomas Thrainer
  node_name = lu.cfg.GetNodeName(node_uuid)
1016 763ad5be Thomas Thrainer
1017 763ad5be Thomas Thrainer
  if disks is None:
1018 763ad5be Thomas Thrainer
    disks = [(idx, disk, 0)
1019 763ad5be Thomas Thrainer
             for (idx, disk) in enumerate(instance.disks)]
1020 763ad5be Thomas Thrainer
1021 763ad5be Thomas Thrainer
  logging.info("Pausing synchronization of disks of instance '%s'",
1022 763ad5be Thomas Thrainer
               instance.name)
1023 1c3231aa Thomas Thrainer
  result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
1024 763ad5be Thomas Thrainer
                                                  (map(compat.snd, disks),
1025 763ad5be Thomas Thrainer
                                                   instance),
1026 763ad5be Thomas Thrainer
                                                  True)
1027 1c3231aa Thomas Thrainer
  result.Raise("Failed to pause disk synchronization on node '%s'" % node_name)
1028 763ad5be Thomas Thrainer
1029 763ad5be Thomas Thrainer
  for idx, success in enumerate(result.payload):
1030 763ad5be Thomas Thrainer
    if not success:
1031 763ad5be Thomas Thrainer
      logging.warn("Pausing synchronization of disk %s of instance '%s'"
1032 763ad5be Thomas Thrainer
                   " failed", idx, instance.name)
1033 763ad5be Thomas Thrainer
1034 763ad5be Thomas Thrainer
  try:
1035 763ad5be Thomas Thrainer
    for (idx, device, offset) in disks:
1036 763ad5be Thomas Thrainer
      # The wipe size is MIN_WIPE_CHUNK_PERCENT % of the instance disk but
1037 763ad5be Thomas Thrainer
      # MAX_WIPE_CHUNK at max. Truncating to integer to avoid rounding errors.
1038 763ad5be Thomas Thrainer
      wipe_chunk_size = \
1039 763ad5be Thomas Thrainer
        int(min(constants.MAX_WIPE_CHUNK,
1040 763ad5be Thomas Thrainer
                device.size / 100.0 * constants.MIN_WIPE_CHUNK_PERCENT))
1041 763ad5be Thomas Thrainer
1042 763ad5be Thomas Thrainer
      size = device.size
1043 763ad5be Thomas Thrainer
      last_output = 0
1044 763ad5be Thomas Thrainer
      start_time = time.time()
1045 763ad5be Thomas Thrainer
1046 763ad5be Thomas Thrainer
      if offset == 0:
1047 763ad5be Thomas Thrainer
        info_text = ""
1048 763ad5be Thomas Thrainer
      else:
1049 763ad5be Thomas Thrainer
        info_text = (" (from %s to %s)" %
1050 763ad5be Thomas Thrainer
                     (utils.FormatUnit(offset, "h"),
1051 763ad5be Thomas Thrainer
                      utils.FormatUnit(size, "h")))
1052 763ad5be Thomas Thrainer
1053 763ad5be Thomas Thrainer
      lu.LogInfo("* Wiping disk %s%s", idx, info_text)
1054 763ad5be Thomas Thrainer
1055 763ad5be Thomas Thrainer
      logging.info("Wiping disk %d for instance %s on node %s using"
1056 1c3231aa Thomas Thrainer
                   " chunk size %s", idx, instance.name, node_name,
1057 1c3231aa Thomas Thrainer
                   wipe_chunk_size)
1058 763ad5be Thomas Thrainer
1059 763ad5be Thomas Thrainer
      while offset < size:
1060 763ad5be Thomas Thrainer
        wipe_size = min(wipe_chunk_size, size - offset)
1061 763ad5be Thomas Thrainer
1062 763ad5be Thomas Thrainer
        logging.debug("Wiping disk %d, offset %s, chunk %s",
1063 763ad5be Thomas Thrainer
                      idx, offset, wipe_size)
1064 763ad5be Thomas Thrainer
1065 1c3231aa Thomas Thrainer
        result = lu.rpc.call_blockdev_wipe(node_uuid, (device, instance),
1066 1c3231aa Thomas Thrainer
                                           offset, wipe_size)
1067 763ad5be Thomas Thrainer
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
1068 763ad5be Thomas Thrainer
                     (idx, offset, wipe_size))
1069 763ad5be Thomas Thrainer
1070 763ad5be Thomas Thrainer
        now = time.time()
1071 763ad5be Thomas Thrainer
        offset += wipe_size
1072 763ad5be Thomas Thrainer
        if now - last_output >= 60:
1073 763ad5be Thomas Thrainer
          eta = _CalcEta(now - start_time, offset, size)
1074 763ad5be Thomas Thrainer
          lu.LogInfo(" - done: %.1f%% ETA: %s",
1075 763ad5be Thomas Thrainer
                     offset / float(size) * 100, utils.FormatSeconds(eta))
1076 763ad5be Thomas Thrainer
          last_output = now
1077 763ad5be Thomas Thrainer
  finally:
1078 763ad5be Thomas Thrainer
    logging.info("Resuming synchronization of disks for instance '%s'",
1079 763ad5be Thomas Thrainer
                 instance.name)
1080 763ad5be Thomas Thrainer
1081 1c3231aa Thomas Thrainer
    result = lu.rpc.call_blockdev_pause_resume_sync(node_uuid,
1082 763ad5be Thomas Thrainer
                                                    (map(compat.snd, disks),
1083 763ad5be Thomas Thrainer
                                                     instance),
1084 763ad5be Thomas Thrainer
                                                    False)
1085 763ad5be Thomas Thrainer
1086 763ad5be Thomas Thrainer
    if result.fail_msg:
1087 763ad5be Thomas Thrainer
      lu.LogWarning("Failed to resume disk synchronization on node '%s': %s",
1088 1c3231aa Thomas Thrainer
                    node_name, result.fail_msg)
1089 763ad5be Thomas Thrainer
    else:
1090 763ad5be Thomas Thrainer
      for idx, success in enumerate(result.payload):
1091 763ad5be Thomas Thrainer
        if not success:
1092 763ad5be Thomas Thrainer
          lu.LogWarning("Resuming synchronization of disk %s of instance '%s'"
1093 763ad5be Thomas Thrainer
                        " failed", idx, instance.name)
1094 763ad5be Thomas Thrainer
1095 763ad5be Thomas Thrainer
1096 a365b47f Bernardo Dal Seno
def WipeOrCleanupDisks(lu, instance, disks=None, cleanup=None):
1097 a365b47f Bernardo Dal Seno
  """Wrapper for L{WipeDisks} that handles errors.
1098 a365b47f Bernardo Dal Seno

1099 a365b47f Bernardo Dal Seno
  @type lu: L{LogicalUnit}
1100 a365b47f Bernardo Dal Seno
  @param lu: the logical unit on whose behalf we execute
1101 a365b47f Bernardo Dal Seno
  @type instance: L{objects.Instance}
1102 a365b47f Bernardo Dal Seno
  @param instance: the instance whose disks we should wipe
1103 a365b47f Bernardo Dal Seno
  @param disks: see L{WipeDisks}
1104 a365b47f Bernardo Dal Seno
  @param cleanup: the result returned by L{CreateDisks}, used for cleanup in
1105 a365b47f Bernardo Dal Seno
      case of error
1106 a365b47f Bernardo Dal Seno
  @raise errors.OpPrereqError: in case of failure
1107 a365b47f Bernardo Dal Seno

1108 a365b47f Bernardo Dal Seno
  """
1109 a365b47f Bernardo Dal Seno
  try:
1110 a365b47f Bernardo Dal Seno
    WipeDisks(lu, instance, disks=disks)
1111 a365b47f Bernardo Dal Seno
  except errors.OpExecError:
1112 a365b47f Bernardo Dal Seno
    logging.warning("Wiping disks for instance '%s' failed",
1113 a365b47f Bernardo Dal Seno
                    instance.name)
1114 0c3d9c7c Thomas Thrainer
    _UndoCreateDisks(lu, cleanup, instance)
1115 a365b47f Bernardo Dal Seno
    raise
1116 a365b47f Bernardo Dal Seno
1117 a365b47f Bernardo Dal Seno
1118 5eacbcae Thomas Thrainer
def ExpandCheckDisks(instance, disks):
1119 763ad5be Thomas Thrainer
  """Return the instance disks selected by the disks list
1120 763ad5be Thomas Thrainer

1121 763ad5be Thomas Thrainer
  @type disks: list of L{objects.Disk} or None
1122 763ad5be Thomas Thrainer
  @param disks: selected disks
1123 763ad5be Thomas Thrainer
  @rtype: list of L{objects.Disk}
1124 763ad5be Thomas Thrainer
  @return: selected instance disks to act on
1125 763ad5be Thomas Thrainer

1126 763ad5be Thomas Thrainer
  """
1127 763ad5be Thomas Thrainer
  if disks is None:
1128 763ad5be Thomas Thrainer
    return instance.disks
1129 763ad5be Thomas Thrainer
  else:
1130 763ad5be Thomas Thrainer
    if not set(disks).issubset(instance.disks):
1131 763ad5be Thomas Thrainer
      raise errors.ProgrammerError("Can only act on disks belonging to the"
1132 328201a5 Guido Trotter
                                   " target instance: expected a subset of %r,"
1133 328201a5 Guido Trotter
                                   " got %r" % (instance.disks, disks))
1134 763ad5be Thomas Thrainer
    return disks
1135 763ad5be Thomas Thrainer
1136 763ad5be Thomas Thrainer
1137 5eacbcae Thomas Thrainer
def WaitForSync(lu, instance, disks=None, oneshot=False):
1138 763ad5be Thomas Thrainer
  """Sleep and poll for an instance's disk to sync.
1139 763ad5be Thomas Thrainer

1140 763ad5be Thomas Thrainer
  """
1141 763ad5be Thomas Thrainer
  if not instance.disks or disks is not None and not disks:
1142 763ad5be Thomas Thrainer
    return True
1143 763ad5be Thomas Thrainer
1144 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1145 763ad5be Thomas Thrainer
1146 763ad5be Thomas Thrainer
  if not oneshot:
1147 763ad5be Thomas Thrainer
    lu.LogInfo("Waiting for instance %s to sync disks", instance.name)
1148 763ad5be Thomas Thrainer
1149 1c3231aa Thomas Thrainer
  node_uuid = instance.primary_node
1150 1c3231aa Thomas Thrainer
  node_name = lu.cfg.GetNodeName(node_uuid)
1151 763ad5be Thomas Thrainer
1152 763ad5be Thomas Thrainer
  # TODO: Convert to utils.Retry
1153 763ad5be Thomas Thrainer
1154 763ad5be Thomas Thrainer
  retries = 0
1155 763ad5be Thomas Thrainer
  degr_retries = 10 # in seconds, as we sleep 1 second each time
1156 763ad5be Thomas Thrainer
  while True:
1157 763ad5be Thomas Thrainer
    max_time = 0
1158 763ad5be Thomas Thrainer
    done = True
1159 763ad5be Thomas Thrainer
    cumul_degraded = False
1160 1c3231aa Thomas Thrainer
    rstats = lu.rpc.call_blockdev_getmirrorstatus(node_uuid, (disks, instance))
1161 763ad5be Thomas Thrainer
    msg = rstats.fail_msg
1162 763ad5be Thomas Thrainer
    if msg:
1163 1c3231aa Thomas Thrainer
      lu.LogWarning("Can't get any data from node %s: %s", node_name, msg)
1164 763ad5be Thomas Thrainer
      retries += 1
1165 763ad5be Thomas Thrainer
      if retries >= 10:
1166 763ad5be Thomas Thrainer
        raise errors.RemoteError("Can't contact node %s for mirror data,"
1167 1c3231aa Thomas Thrainer
                                 " aborting." % node_name)
1168 763ad5be Thomas Thrainer
      time.sleep(6)
1169 763ad5be Thomas Thrainer
      continue
1170 763ad5be Thomas Thrainer
    rstats = rstats.payload
1171 763ad5be Thomas Thrainer
    retries = 0
1172 763ad5be Thomas Thrainer
    for i, mstat in enumerate(rstats):
1173 763ad5be Thomas Thrainer
      if mstat is None:
1174 763ad5be Thomas Thrainer
        lu.LogWarning("Can't compute data for node %s/%s",
1175 1c3231aa Thomas Thrainer
                      node_name, disks[i].iv_name)
1176 763ad5be Thomas Thrainer
        continue
1177 763ad5be Thomas Thrainer
1178 763ad5be Thomas Thrainer
      cumul_degraded = (cumul_degraded or
1179 763ad5be Thomas Thrainer
                        (mstat.is_degraded and mstat.sync_percent is None))
1180 763ad5be Thomas Thrainer
      if mstat.sync_percent is not None:
1181 763ad5be Thomas Thrainer
        done = False
1182 763ad5be Thomas Thrainer
        if mstat.estimated_time is not None:
1183 763ad5be Thomas Thrainer
          rem_time = ("%s remaining (estimated)" %
1184 763ad5be Thomas Thrainer
                      utils.FormatSeconds(mstat.estimated_time))
1185 763ad5be Thomas Thrainer
          max_time = mstat.estimated_time
1186 763ad5be Thomas Thrainer
        else:
1187 763ad5be Thomas Thrainer
          rem_time = "no time estimate"
1188 763ad5be Thomas Thrainer
        lu.LogInfo("- device %s: %5.2f%% done, %s",
1189 763ad5be Thomas Thrainer
                   disks[i].iv_name, mstat.sync_percent, rem_time)
1190 763ad5be Thomas Thrainer
1191 763ad5be Thomas Thrainer
    # if we're done but degraded, let's do a few small retries, to
1192 763ad5be Thomas Thrainer
    # make sure we see a stable and not transient situation; therefore
1193 763ad5be Thomas Thrainer
    # we force restart of the loop
1194 763ad5be Thomas Thrainer
    if (done or oneshot) and cumul_degraded and degr_retries > 0:
1195 763ad5be Thomas Thrainer
      logging.info("Degraded disks found, %d retries left", degr_retries)
1196 763ad5be Thomas Thrainer
      degr_retries -= 1
1197 763ad5be Thomas Thrainer
      time.sleep(1)
1198 763ad5be Thomas Thrainer
      continue
1199 763ad5be Thomas Thrainer
1200 763ad5be Thomas Thrainer
    if done or oneshot:
1201 763ad5be Thomas Thrainer
      break
1202 763ad5be Thomas Thrainer
1203 763ad5be Thomas Thrainer
    time.sleep(min(60, max_time))
1204 763ad5be Thomas Thrainer
1205 763ad5be Thomas Thrainer
  if done:
1206 763ad5be Thomas Thrainer
    lu.LogInfo("Instance %s's disks are in sync", instance.name)
1207 763ad5be Thomas Thrainer
1208 763ad5be Thomas Thrainer
  return not cumul_degraded
1209 763ad5be Thomas Thrainer
1210 763ad5be Thomas Thrainer
1211 5eacbcae Thomas Thrainer
def ShutdownInstanceDisks(lu, instance, disks=None, ignore_primary=False):
1212 763ad5be Thomas Thrainer
  """Shutdown block devices of an instance.
1213 763ad5be Thomas Thrainer

1214 763ad5be Thomas Thrainer
  This does the shutdown on all nodes of the instance.
1215 763ad5be Thomas Thrainer

1216 763ad5be Thomas Thrainer
  If the ignore_primary is false, errors on the primary node are
1217 763ad5be Thomas Thrainer
  ignored.
1218 763ad5be Thomas Thrainer

1219 763ad5be Thomas Thrainer
  """
1220 763ad5be Thomas Thrainer
  all_result = True
1221 ba924970 Dimitris Aragiorgis
1222 ba924970 Dimitris Aragiorgis
  if disks is None:
1223 ba924970 Dimitris Aragiorgis
    # only mark instance disks as inactive if all disks are affected
1224 ba924970 Dimitris Aragiorgis
    lu.cfg.MarkInstanceDisksInactive(instance.uuid)
1225 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1226 763ad5be Thomas Thrainer
1227 763ad5be Thomas Thrainer
  for disk in disks:
1228 1c3231aa Thomas Thrainer
    for node_uuid, top_disk in disk.ComputeNodeTree(instance.primary_node):
1229 1c3231aa Thomas Thrainer
      result = lu.rpc.call_blockdev_shutdown(node_uuid, (top_disk, instance))
1230 763ad5be Thomas Thrainer
      msg = result.fail_msg
1231 763ad5be Thomas Thrainer
      if msg:
1232 763ad5be Thomas Thrainer
        lu.LogWarning("Could not shutdown block device %s on node %s: %s",
1233 1c3231aa Thomas Thrainer
                      disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
1234 1c3231aa Thomas Thrainer
        if ((node_uuid == instance.primary_node and not ignore_primary) or
1235 1c3231aa Thomas Thrainer
            (node_uuid != instance.primary_node and not result.offline)):
1236 763ad5be Thomas Thrainer
          all_result = False
1237 763ad5be Thomas Thrainer
  return all_result
1238 763ad5be Thomas Thrainer
1239 763ad5be Thomas Thrainer
1240 763ad5be Thomas Thrainer
def _SafeShutdownInstanceDisks(lu, instance, disks=None):
1241 763ad5be Thomas Thrainer
  """Shutdown block devices of an instance.
1242 763ad5be Thomas Thrainer

1243 763ad5be Thomas Thrainer
  This function checks if an instance is running, before calling
1244 763ad5be Thomas Thrainer
  _ShutdownInstanceDisks.
1245 763ad5be Thomas Thrainer

1246 763ad5be Thomas Thrainer
  """
1247 5eacbcae Thomas Thrainer
  CheckInstanceState(lu, instance, INSTANCE_DOWN, msg="cannot shutdown disks")
1248 5eacbcae Thomas Thrainer
  ShutdownInstanceDisks(lu, instance, disks=disks)
1249 763ad5be Thomas Thrainer
1250 763ad5be Thomas Thrainer
1251 5eacbcae Thomas Thrainer
def AssembleInstanceDisks(lu, instance, disks=None, ignore_secondaries=False,
1252 ba924970 Dimitris Aragiorgis
                          ignore_size=False):
1253 763ad5be Thomas Thrainer
  """Prepare the block devices for an instance.
1254 763ad5be Thomas Thrainer

1255 763ad5be Thomas Thrainer
  This sets up the block devices on all nodes.
1256 763ad5be Thomas Thrainer

1257 763ad5be Thomas Thrainer
  @type lu: L{LogicalUnit}
1258 763ad5be Thomas Thrainer
  @param lu: the logical unit on whose behalf we execute
1259 763ad5be Thomas Thrainer
  @type instance: L{objects.Instance}
1260 763ad5be Thomas Thrainer
  @param instance: the instance for whose disks we assemble
1261 763ad5be Thomas Thrainer
  @type disks: list of L{objects.Disk} or None
1262 763ad5be Thomas Thrainer
  @param disks: which disks to assemble (or all, if None)
1263 763ad5be Thomas Thrainer
  @type ignore_secondaries: boolean
1264 763ad5be Thomas Thrainer
  @param ignore_secondaries: if true, errors on secondary nodes
1265 763ad5be Thomas Thrainer
      won't result in an error return from the function
1266 763ad5be Thomas Thrainer
  @type ignore_size: boolean
1267 763ad5be Thomas Thrainer
  @param ignore_size: if true, the current known size of the disk
1268 763ad5be Thomas Thrainer
      will not be used during the disk activation, useful for cases
1269 763ad5be Thomas Thrainer
      when the size is wrong
1270 763ad5be Thomas Thrainer
  @return: False if the operation failed, otherwise a list of
1271 763ad5be Thomas Thrainer
      (host, instance_visible_name, node_visible_name)
1272 763ad5be Thomas Thrainer
      with the mapping from node devices to instance devices
1273 763ad5be Thomas Thrainer

1274 763ad5be Thomas Thrainer
  """
1275 763ad5be Thomas Thrainer
  device_info = []
1276 763ad5be Thomas Thrainer
  disks_ok = True
1277 ba924970 Dimitris Aragiorgis
1278 ba924970 Dimitris Aragiorgis
  if disks is None:
1279 ba924970 Dimitris Aragiorgis
    # only mark instance disks as active if all disks are affected
1280 ba924970 Dimitris Aragiorgis
    lu.cfg.MarkInstanceDisksActive(instance.uuid)
1281 ba924970 Dimitris Aragiorgis
1282 5eacbcae Thomas Thrainer
  disks = ExpandCheckDisks(instance, disks)
1283 763ad5be Thomas Thrainer
1284 763ad5be Thomas Thrainer
  # With the two passes mechanism we try to reduce the window of
1285 763ad5be Thomas Thrainer
  # opportunity for the race condition of switching DRBD to primary
1286 763ad5be Thomas Thrainer
  # before handshaking occured, but we do not eliminate it
1287 763ad5be Thomas Thrainer
1288 763ad5be Thomas Thrainer
  # The proper fix would be to wait (with some limits) until the
1289 763ad5be Thomas Thrainer
  # connection has been made and drbd transitions from WFConnection
1290 763ad5be Thomas Thrainer
  # into any other network-connected state (Connected, SyncTarget,
1291 763ad5be Thomas Thrainer
  # SyncSource, etc.)
1292 763ad5be Thomas Thrainer
1293 763ad5be Thomas Thrainer
  # 1st pass, assemble on all nodes in secondary mode
1294 763ad5be Thomas Thrainer
  for idx, inst_disk in enumerate(disks):
1295 1c3231aa Thomas Thrainer
    for node_uuid, node_disk in inst_disk.ComputeNodeTree(
1296 1c3231aa Thomas Thrainer
                                  instance.primary_node):
1297 763ad5be Thomas Thrainer
      if ignore_size:
1298 763ad5be Thomas Thrainer
        node_disk = node_disk.Copy()
1299 763ad5be Thomas Thrainer
        node_disk.UnsetSize()
1300 1c3231aa Thomas Thrainer
      result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
1301 da4a52a3 Thomas Thrainer
                                             instance.name, False, idx)
1302 763ad5be Thomas Thrainer
      msg = result.fail_msg
1303 763ad5be Thomas Thrainer
      if msg:
1304 1c3231aa Thomas Thrainer
        is_offline_secondary = (node_uuid in instance.secondary_nodes and
1305 763ad5be Thomas Thrainer
                                result.offline)
1306 763ad5be Thomas Thrainer
        lu.LogWarning("Could not prepare block device %s on node %s"
1307 763ad5be Thomas Thrainer
                      " (is_primary=False, pass=1): %s",
1308 1c3231aa Thomas Thrainer
                      inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
1309 763ad5be Thomas Thrainer
        if not (ignore_secondaries or is_offline_secondary):
1310 763ad5be Thomas Thrainer
          disks_ok = False
1311 763ad5be Thomas Thrainer
1312 763ad5be Thomas Thrainer
  # FIXME: race condition on drbd migration to primary
1313 763ad5be Thomas Thrainer
1314 763ad5be Thomas Thrainer
  # 2nd pass, do only the primary node
1315 763ad5be Thomas Thrainer
  for idx, inst_disk in enumerate(disks):
1316 763ad5be Thomas Thrainer
    dev_path = None
1317 763ad5be Thomas Thrainer
1318 1c3231aa Thomas Thrainer
    for node_uuid, node_disk in inst_disk.ComputeNodeTree(
1319 1c3231aa Thomas Thrainer
                                  instance.primary_node):
1320 1c3231aa Thomas Thrainer
      if node_uuid != instance.primary_node:
1321 763ad5be Thomas Thrainer
        continue
1322 763ad5be Thomas Thrainer
      if ignore_size:
1323 763ad5be Thomas Thrainer
        node_disk = node_disk.Copy()
1324 763ad5be Thomas Thrainer
        node_disk.UnsetSize()
1325 1c3231aa Thomas Thrainer
      result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
1326 da4a52a3 Thomas Thrainer
                                             instance.name, True, idx)
1327 763ad5be Thomas Thrainer
      msg = result.fail_msg
1328 763ad5be Thomas Thrainer
      if msg:
1329 763ad5be Thomas Thrainer
        lu.LogWarning("Could not prepare block device %s on node %s"
1330 763ad5be Thomas Thrainer
                      " (is_primary=True, pass=2): %s",
1331 1c3231aa Thomas Thrainer
                      inst_disk.iv_name, lu.cfg.GetNodeName(node_uuid), msg)
1332 763ad5be Thomas Thrainer
        disks_ok = False
1333 763ad5be Thomas Thrainer
      else:
1334 ba924970 Dimitris Aragiorgis
        dev_path, _ = result.payload
1335 763ad5be Thomas Thrainer
1336 1c3231aa Thomas Thrainer
    device_info.append((lu.cfg.GetNodeName(instance.primary_node),
1337 1c3231aa Thomas Thrainer
                        inst_disk.iv_name, dev_path))
1338 763ad5be Thomas Thrainer
1339 1d4a4b26 Thomas Thrainer
  if not disks_ok:
1340 da4a52a3 Thomas Thrainer
    lu.cfg.MarkInstanceDisksInactive(instance.uuid)
1341 1d4a4b26 Thomas Thrainer
1342 763ad5be Thomas Thrainer
  return disks_ok, device_info
1343 763ad5be Thomas Thrainer
1344 763ad5be Thomas Thrainer
1345 5eacbcae Thomas Thrainer
def StartInstanceDisks(lu, instance, force):
1346 763ad5be Thomas Thrainer
  """Start the disks of an instance.
1347 763ad5be Thomas Thrainer

1348 763ad5be Thomas Thrainer
  """
1349 5eacbcae Thomas Thrainer
  disks_ok, _ = AssembleInstanceDisks(lu, instance,
1350 5eacbcae Thomas Thrainer
                                      ignore_secondaries=force)
1351 763ad5be Thomas Thrainer
  if not disks_ok:
1352 5eacbcae Thomas Thrainer
    ShutdownInstanceDisks(lu, instance)
1353 763ad5be Thomas Thrainer
    if force is not None and not force:
1354 763ad5be Thomas Thrainer
      lu.LogWarning("",
1355 763ad5be Thomas Thrainer
                    hint=("If the message above refers to a secondary node,"
1356 763ad5be Thomas Thrainer
                          " you can retry the operation using '--force'"))
1357 763ad5be Thomas Thrainer
    raise errors.OpExecError("Disk consistency error")
1358 763ad5be Thomas Thrainer
1359 763ad5be Thomas Thrainer
1360 763ad5be Thomas Thrainer
class LUInstanceGrowDisk(LogicalUnit):
1361 763ad5be Thomas Thrainer
  """Grow a disk of an instance.
1362 763ad5be Thomas Thrainer

1363 763ad5be Thomas Thrainer
  """
1364 763ad5be Thomas Thrainer
  HPATH = "disk-grow"
1365 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
1366 763ad5be Thomas Thrainer
  REQ_BGL = False
1367 763ad5be Thomas Thrainer
1368 763ad5be Thomas Thrainer
  def ExpandNames(self):
1369 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1370 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1371 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1372 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1373 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
1374 763ad5be Thomas Thrainer
1375 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1376 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1377 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1378 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
1379 763ad5be Thomas Thrainer
      # Copy node locks
1380 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1381 5eacbcae Thomas Thrainer
        CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1382 763ad5be Thomas Thrainer
1383 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
1384 763ad5be Thomas Thrainer
    """Build hooks env.
1385 763ad5be Thomas Thrainer

1386 763ad5be Thomas Thrainer
    This runs on the master, the primary and all the secondaries.
1387 763ad5be Thomas Thrainer

1388 763ad5be Thomas Thrainer
    """
1389 763ad5be Thomas Thrainer
    env = {
1390 763ad5be Thomas Thrainer
      "DISK": self.op.disk,
1391 763ad5be Thomas Thrainer
      "AMOUNT": self.op.amount,
1392 763ad5be Thomas Thrainer
      "ABSOLUTE": self.op.absolute,
1393 763ad5be Thomas Thrainer
      }
1394 5eacbcae Thomas Thrainer
    env.update(BuildInstanceHookEnvByObject(self, self.instance))
1395 763ad5be Thomas Thrainer
    return env
1396 763ad5be Thomas Thrainer
1397 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
1398 763ad5be Thomas Thrainer
    """Build hooks nodes.
1399 763ad5be Thomas Thrainer

1400 763ad5be Thomas Thrainer
    """
1401 763ad5be Thomas Thrainer
    nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
1402 763ad5be Thomas Thrainer
    return (nl, nl)
1403 763ad5be Thomas Thrainer
1404 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1405 763ad5be Thomas Thrainer
    """Check prerequisites.
1406 763ad5be Thomas Thrainer

1407 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1408 763ad5be Thomas Thrainer

1409 763ad5be Thomas Thrainer
    """
1410 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1411 da4a52a3 Thomas Thrainer
    assert self.instance is not None, \
1412 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1413 da4a52a3 Thomas Thrainer
    node_uuids = list(self.instance.all_nodes)
1414 1c3231aa Thomas Thrainer
    for node_uuid in node_uuids:
1415 1c3231aa Thomas Thrainer
      CheckNodeOnline(self, node_uuid)
1416 e43a624e Bernardo Dal Seno
    self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
1417 763ad5be Thomas Thrainer
1418 da4a52a3 Thomas Thrainer
    if self.instance.disk_template not in constants.DTS_GROWABLE:
1419 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Instance's disk layout does not support"
1420 763ad5be Thomas Thrainer
                                 " growing", errors.ECODE_INVAL)
1421 763ad5be Thomas Thrainer
1422 da4a52a3 Thomas Thrainer
    self.disk = self.instance.FindDisk(self.op.disk)
1423 763ad5be Thomas Thrainer
1424 763ad5be Thomas Thrainer
    if self.op.absolute:
1425 763ad5be Thomas Thrainer
      self.target = self.op.amount
1426 763ad5be Thomas Thrainer
      self.delta = self.target - self.disk.size
1427 763ad5be Thomas Thrainer
      if self.delta < 0:
1428 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Requested size (%s) is smaller than "
1429 763ad5be Thomas Thrainer
                                   "current disk size (%s)" %
1430 763ad5be Thomas Thrainer
                                   (utils.FormatUnit(self.target, "h"),
1431 763ad5be Thomas Thrainer
                                    utils.FormatUnit(self.disk.size, "h")),
1432 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
1433 763ad5be Thomas Thrainer
    else:
1434 763ad5be Thomas Thrainer
      self.delta = self.op.amount
1435 763ad5be Thomas Thrainer
      self.target = self.disk.size + self.delta
1436 763ad5be Thomas Thrainer
      if self.delta < 0:
1437 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Requested increment (%s) is negative" %
1438 763ad5be Thomas Thrainer
                                   utils.FormatUnit(self.delta, "h"),
1439 763ad5be Thomas Thrainer
                                   errors.ECODE_INVAL)
1440 763ad5be Thomas Thrainer
1441 1c3231aa Thomas Thrainer
    self._CheckDiskSpace(node_uuids, self.disk.ComputeGrowth(self.delta))
1442 763ad5be Thomas Thrainer
1443 1c3231aa Thomas Thrainer
  def _CheckDiskSpace(self, node_uuids, req_vgspace):
1444 763ad5be Thomas Thrainer
    template = self.instance.disk_template
1445 8e5a911a Bernardo Dal Seno
    if (template not in (constants.DTS_NO_FREE_SPACE_CHECK) and
1446 8e5a911a Bernardo Dal Seno
        not any(self.node_es_flags.values())):
1447 763ad5be Thomas Thrainer
      # TODO: check the free disk space for file, when that feature will be
1448 763ad5be Thomas Thrainer
      # supported
1449 8e5a911a Bernardo Dal Seno
      # With exclusive storage we need to do something smarter than just looking
1450 8e5a911a Bernardo Dal Seno
      # at free space, which, in the end, is basically a dry run. So we rely on
1451 8e5a911a Bernardo Dal Seno
      # the dry run performed in Exec() instead.
1452 1c3231aa Thomas Thrainer
      CheckNodesFreeDiskPerVG(self, node_uuids, req_vgspace)
1453 763ad5be Thomas Thrainer
1454 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1455 763ad5be Thomas Thrainer
    """Execute disk grow.
1456 763ad5be Thomas Thrainer

1457 763ad5be Thomas Thrainer
    """
1458 d0d7d7cf Thomas Thrainer
    assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1459 763ad5be Thomas Thrainer
    assert (self.owned_locks(locking.LEVEL_NODE) ==
1460 763ad5be Thomas Thrainer
            self.owned_locks(locking.LEVEL_NODE_RES))
1461 763ad5be Thomas Thrainer
1462 763ad5be Thomas Thrainer
    wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
1463 763ad5be Thomas Thrainer
1464 d0d7d7cf Thomas Thrainer
    disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[self.disk])
1465 763ad5be Thomas Thrainer
    if not disks_ok:
1466 763ad5be Thomas Thrainer
      raise errors.OpExecError("Cannot activate block device to grow")
1467 763ad5be Thomas Thrainer
1468 763ad5be Thomas Thrainer
    feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
1469 d0d7d7cf Thomas Thrainer
                (self.op.disk, self.instance.name,
1470 763ad5be Thomas Thrainer
                 utils.FormatUnit(self.delta, "h"),
1471 763ad5be Thomas Thrainer
                 utils.FormatUnit(self.target, "h")))
1472 763ad5be Thomas Thrainer
1473 763ad5be Thomas Thrainer
    # First run all grow ops in dry-run mode
1474 d0d7d7cf Thomas Thrainer
    for node_uuid in self.instance.all_nodes:
1475 d0d7d7cf Thomas Thrainer
      result = self.rpc.call_blockdev_grow(node_uuid,
1476 d0d7d7cf Thomas Thrainer
                                           (self.disk, self.instance),
1477 e43a624e Bernardo Dal Seno
                                           self.delta, True, True,
1478 e43a624e Bernardo Dal Seno
                                           self.node_es_flags[node_uuid])
1479 1c3231aa Thomas Thrainer
      result.Raise("Dry-run grow request failed to node %s" %
1480 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(node_uuid))
1481 763ad5be Thomas Thrainer
1482 763ad5be Thomas Thrainer
    if wipe_disks:
1483 763ad5be Thomas Thrainer
      # Get disk size from primary node for wiping
1484 0c3d9c7c Thomas Thrainer
      result = self.rpc.call_blockdev_getdimensions(
1485 d66acf3d Thomas Thrainer
                 self.instance.primary_node, [([self.disk], self.instance)])
1486 763ad5be Thomas Thrainer
      result.Raise("Failed to retrieve disk size from node '%s'" %
1487 d0d7d7cf Thomas Thrainer
                   self.instance.primary_node)
1488 763ad5be Thomas Thrainer
1489 6ef8077e Bernardo Dal Seno
      (disk_dimensions, ) = result.payload
1490 763ad5be Thomas Thrainer
1491 6ef8077e Bernardo Dal Seno
      if disk_dimensions is None:
1492 763ad5be Thomas Thrainer
        raise errors.OpExecError("Failed to retrieve disk size from primary"
1493 d0d7d7cf Thomas Thrainer
                                 " node '%s'" % self.instance.primary_node)
1494 6ef8077e Bernardo Dal Seno
      (disk_size_in_bytes, _) = disk_dimensions
1495 763ad5be Thomas Thrainer
1496 763ad5be Thomas Thrainer
      old_disk_size = _DiskSizeInBytesToMebibytes(self, disk_size_in_bytes)
1497 763ad5be Thomas Thrainer
1498 d0d7d7cf Thomas Thrainer
      assert old_disk_size >= self.disk.size, \
1499 763ad5be Thomas Thrainer
        ("Retrieved disk size too small (got %s, should be at least %s)" %
1500 d0d7d7cf Thomas Thrainer
         (old_disk_size, self.disk.size))
1501 763ad5be Thomas Thrainer
    else:
1502 763ad5be Thomas Thrainer
      old_disk_size = None
1503 763ad5be Thomas Thrainer
1504 763ad5be Thomas Thrainer
    # We know that (as far as we can test) operations across different
1505 763ad5be Thomas Thrainer
    # nodes will succeed, time to run it for real on the backing storage
1506 d0d7d7cf Thomas Thrainer
    for node_uuid in self.instance.all_nodes:
1507 d0d7d7cf Thomas Thrainer
      result = self.rpc.call_blockdev_grow(node_uuid,
1508 d0d7d7cf Thomas Thrainer
                                           (self.disk, self.instance),
1509 e43a624e Bernardo Dal Seno
                                           self.delta, False, True,
1510 e43a624e Bernardo Dal Seno
                                           self.node_es_flags[node_uuid])
1511 1c3231aa Thomas Thrainer
      result.Raise("Grow request failed to node %s" %
1512 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(node_uuid))
1513 763ad5be Thomas Thrainer
1514 763ad5be Thomas Thrainer
    # And now execute it for logical storage, on the primary node
1515 d0d7d7cf Thomas Thrainer
    node_uuid = self.instance.primary_node
1516 d0d7d7cf Thomas Thrainer
    result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance),
1517 e43a624e Bernardo Dal Seno
                                         self.delta, False, False,
1518 e43a624e Bernardo Dal Seno
                                         self.node_es_flags[node_uuid])
1519 1c3231aa Thomas Thrainer
    result.Raise("Grow request failed to node %s" %
1520 1c3231aa Thomas Thrainer
                 self.cfg.GetNodeName(node_uuid))
1521 763ad5be Thomas Thrainer
1522 d0d7d7cf Thomas Thrainer
    self.disk.RecordGrow(self.delta)
1523 d0d7d7cf Thomas Thrainer
    self.cfg.Update(self.instance, feedback_fn)
1524 763ad5be Thomas Thrainer
1525 763ad5be Thomas Thrainer
    # Changes have been recorded, release node lock
1526 5eacbcae Thomas Thrainer
    ReleaseLocks(self, locking.LEVEL_NODE)
1527 763ad5be Thomas Thrainer
1528 763ad5be Thomas Thrainer
    # Downgrade lock while waiting for sync
1529 763ad5be Thomas Thrainer
    self.glm.downgrade(locking.LEVEL_INSTANCE)
1530 763ad5be Thomas Thrainer
1531 763ad5be Thomas Thrainer
    assert wipe_disks ^ (old_disk_size is None)
1532 763ad5be Thomas Thrainer
1533 763ad5be Thomas Thrainer
    if wipe_disks:
1534 d0d7d7cf Thomas Thrainer
      assert self.instance.disks[self.op.disk] == self.disk
1535 763ad5be Thomas Thrainer
1536 763ad5be Thomas Thrainer
      # Wipe newly added disk space
1537 d0d7d7cf Thomas Thrainer
      WipeDisks(self, self.instance,
1538 d0d7d7cf Thomas Thrainer
                disks=[(self.op.disk, self.disk, old_disk_size)])
1539 763ad5be Thomas Thrainer
1540 763ad5be Thomas Thrainer
    if self.op.wait_for_sync:
1541 d0d7d7cf Thomas Thrainer
      disk_abort = not WaitForSync(self, self.instance, disks=[self.disk])
1542 763ad5be Thomas Thrainer
      if disk_abort:
1543 763ad5be Thomas Thrainer
        self.LogWarning("Disk syncing has not returned a good status; check"
1544 763ad5be Thomas Thrainer
                        " the instance")
1545 d0d7d7cf Thomas Thrainer
      if not self.instance.disks_active:
1546 d0d7d7cf Thomas Thrainer
        _SafeShutdownInstanceDisks(self, self.instance, disks=[self.disk])
1547 d0d7d7cf Thomas Thrainer
    elif not self.instance.disks_active:
1548 763ad5be Thomas Thrainer
      self.LogWarning("Not shutting down the disk even if the instance is"
1549 763ad5be Thomas Thrainer
                      " not supposed to be running because no wait for"
1550 763ad5be Thomas Thrainer
                      " sync mode was requested")
1551 763ad5be Thomas Thrainer
1552 763ad5be Thomas Thrainer
    assert self.owned_locks(locking.LEVEL_NODE_RES)
1553 d0d7d7cf Thomas Thrainer
    assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
1554 763ad5be Thomas Thrainer
1555 763ad5be Thomas Thrainer
1556 763ad5be Thomas Thrainer
class LUInstanceReplaceDisks(LogicalUnit):
1557 763ad5be Thomas Thrainer
  """Replace the disks of an instance.
1558 763ad5be Thomas Thrainer

1559 763ad5be Thomas Thrainer
  """
1560 763ad5be Thomas Thrainer
  HPATH = "mirrors-replace"
1561 763ad5be Thomas Thrainer
  HTYPE = constants.HTYPE_INSTANCE
1562 763ad5be Thomas Thrainer
  REQ_BGL = False
1563 763ad5be Thomas Thrainer
1564 763ad5be Thomas Thrainer
  def CheckArguments(self):
1565 763ad5be Thomas Thrainer
    """Check arguments.
1566 763ad5be Thomas Thrainer

1567 763ad5be Thomas Thrainer
    """
1568 763ad5be Thomas Thrainer
    if self.op.mode == constants.REPLACE_DISK_CHG:
1569 d0d7d7cf Thomas Thrainer
      if self.op.remote_node is None and self.op.iallocator is None:
1570 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("When changing the secondary either an"
1571 763ad5be Thomas Thrainer
                                   " iallocator script must be used or the"
1572 763ad5be Thomas Thrainer
                                   " new node given", errors.ECODE_INVAL)
1573 763ad5be Thomas Thrainer
      else:
1574 5eacbcae Thomas Thrainer
        CheckIAllocatorOrNode(self, "iallocator", "remote_node")
1575 763ad5be Thomas Thrainer
1576 d0d7d7cf Thomas Thrainer
    elif self.op.remote_node is not None or self.op.iallocator is not None:
1577 763ad5be Thomas Thrainer
      # Not replacing the secondary
1578 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The iallocator and new node options can"
1579 763ad5be Thomas Thrainer
                                 " only be used when changing the"
1580 763ad5be Thomas Thrainer
                                 " secondary node", errors.ECODE_INVAL)
1581 763ad5be Thomas Thrainer
1582 763ad5be Thomas Thrainer
  def ExpandNames(self):
1583 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1584 763ad5be Thomas Thrainer
1585 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODE not in self.needed_locks
1586 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODE_RES not in self.needed_locks
1587 763ad5be Thomas Thrainer
    assert locking.LEVEL_NODEGROUP not in self.needed_locks
1588 763ad5be Thomas Thrainer
1589 763ad5be Thomas Thrainer
    assert self.op.iallocator is None or self.op.remote_node is None, \
1590 763ad5be Thomas Thrainer
      "Conflicting options"
1591 763ad5be Thomas Thrainer
1592 763ad5be Thomas Thrainer
    if self.op.remote_node is not None:
1593 1c3231aa Thomas Thrainer
      (self.op.remote_node_uuid, self.op.remote_node) = \
1594 1c3231aa Thomas Thrainer
        ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
1595 1c3231aa Thomas Thrainer
                              self.op.remote_node)
1596 763ad5be Thomas Thrainer
1597 763ad5be Thomas Thrainer
      # Warning: do not remove the locking of the new secondary here
1598 1bb99a33 Bernardo Dal Seno
      # unless DRBD8Dev.AddChildren is changed to work in parallel;
1599 763ad5be Thomas Thrainer
      # currently it doesn't since parallel invocations of
1600 763ad5be Thomas Thrainer
      # FindUnusedMinor will conflict
1601 1c3231aa Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node_uuid]
1602 763ad5be Thomas Thrainer
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1603 763ad5be Thomas Thrainer
    else:
1604 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE] = []
1605 763ad5be Thomas Thrainer
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1606 763ad5be Thomas Thrainer
1607 763ad5be Thomas Thrainer
      if self.op.iallocator is not None:
1608 763ad5be Thomas Thrainer
        # iallocator will select a new node in the same group
1609 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODEGROUP] = []
1610 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
1611 763ad5be Thomas Thrainer
1612 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE_RES] = []
1613 763ad5be Thomas Thrainer
1614 da4a52a3 Thomas Thrainer
    self.replacer = TLReplaceDisks(self, self.op.instance_uuid,
1615 da4a52a3 Thomas Thrainer
                                   self.op.instance_name, self.op.mode,
1616 1c3231aa Thomas Thrainer
                                   self.op.iallocator, self.op.remote_node_uuid,
1617 763ad5be Thomas Thrainer
                                   self.op.disks, self.op.early_release,
1618 763ad5be Thomas Thrainer
                                   self.op.ignore_ipolicy)
1619 763ad5be Thomas Thrainer
1620 763ad5be Thomas Thrainer
    self.tasklets = [self.replacer]
1621 763ad5be Thomas Thrainer
1622 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1623 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODEGROUP:
1624 1c3231aa Thomas Thrainer
      assert self.op.remote_node_uuid is None
1625 763ad5be Thomas Thrainer
      assert self.op.iallocator is not None
1626 763ad5be Thomas Thrainer
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
1627 763ad5be Thomas Thrainer
1628 763ad5be Thomas Thrainer
      self.share_locks[locking.LEVEL_NODEGROUP] = 1
1629 763ad5be Thomas Thrainer
      # Lock all groups used by instance optimistically; this requires going
1630 763ad5be Thomas Thrainer
      # via the node before it's locked, requiring verification later on
1631 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
1632 da4a52a3 Thomas Thrainer
        self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
1633 763ad5be Thomas Thrainer
1634 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE:
1635 763ad5be Thomas Thrainer
      if self.op.iallocator is not None:
1636 1c3231aa Thomas Thrainer
        assert self.op.remote_node_uuid is None
1637 763ad5be Thomas Thrainer
        assert not self.needed_locks[locking.LEVEL_NODE]
1638 763ad5be Thomas Thrainer
        assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
1639 763ad5be Thomas Thrainer
1640 763ad5be Thomas Thrainer
        # Lock member nodes of all locked groups
1641 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE] = \
1642 1c3231aa Thomas Thrainer
          [node_uuid
1643 763ad5be Thomas Thrainer
           for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
1644 1c3231aa Thomas Thrainer
           for node_uuid in self.cfg.GetNodeGroup(group_uuid).members]
1645 763ad5be Thomas Thrainer
      else:
1646 763ad5be Thomas Thrainer
        assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1647 763ad5be Thomas Thrainer
1648 763ad5be Thomas Thrainer
        self._LockInstancesNodes()
1649 763ad5be Thomas Thrainer
1650 763ad5be Thomas Thrainer
    elif level == locking.LEVEL_NODE_RES:
1651 763ad5be Thomas Thrainer
      # Reuse node locks
1652 763ad5be Thomas Thrainer
      self.needed_locks[locking.LEVEL_NODE_RES] = \
1653 763ad5be Thomas Thrainer
        self.needed_locks[locking.LEVEL_NODE]
1654 763ad5be Thomas Thrainer
1655 763ad5be Thomas Thrainer
  def BuildHooksEnv(self):
1656 763ad5be Thomas Thrainer
    """Build hooks env.
1657 763ad5be Thomas Thrainer

1658 763ad5be Thomas Thrainer
    This runs on the master, the primary and all the secondaries.
1659 763ad5be Thomas Thrainer

1660 763ad5be Thomas Thrainer
    """
1661 763ad5be Thomas Thrainer
    instance = self.replacer.instance
1662 763ad5be Thomas Thrainer
    env = {
1663 763ad5be Thomas Thrainer
      "MODE": self.op.mode,
1664 763ad5be Thomas Thrainer
      "NEW_SECONDARY": self.op.remote_node,
1665 1c3231aa Thomas Thrainer
      "OLD_SECONDARY": self.cfg.GetNodeName(instance.secondary_nodes[0]),
1666 763ad5be Thomas Thrainer
      }
1667 5eacbcae Thomas Thrainer
    env.update(BuildInstanceHookEnvByObject(self, instance))
1668 763ad5be Thomas Thrainer
    return env
1669 763ad5be Thomas Thrainer
1670 763ad5be Thomas Thrainer
  def BuildHooksNodes(self):
1671 763ad5be Thomas Thrainer
    """Build hooks nodes.
1672 763ad5be Thomas Thrainer

1673 763ad5be Thomas Thrainer
    """
1674 763ad5be Thomas Thrainer
    instance = self.replacer.instance
1675 763ad5be Thomas Thrainer
    nl = [
1676 763ad5be Thomas Thrainer
      self.cfg.GetMasterNode(),
1677 763ad5be Thomas Thrainer
      instance.primary_node,
1678 763ad5be Thomas Thrainer
      ]
1679 1c3231aa Thomas Thrainer
    if self.op.remote_node_uuid is not None:
1680 1c3231aa Thomas Thrainer
      nl.append(self.op.remote_node_uuid)
1681 763ad5be Thomas Thrainer
    return nl, nl
1682 763ad5be Thomas Thrainer
1683 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1684 763ad5be Thomas Thrainer
    """Check prerequisites.
1685 763ad5be Thomas Thrainer

1686 763ad5be Thomas Thrainer
    """
1687 763ad5be Thomas Thrainer
    assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
1688 763ad5be Thomas Thrainer
            self.op.iallocator is None)
1689 763ad5be Thomas Thrainer
1690 763ad5be Thomas Thrainer
    # Verify if node group locks are still correct
1691 763ad5be Thomas Thrainer
    owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
1692 763ad5be Thomas Thrainer
    if owned_groups:
1693 da4a52a3 Thomas Thrainer
      CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, owned_groups)
1694 763ad5be Thomas Thrainer
1695 763ad5be Thomas Thrainer
    return LogicalUnit.CheckPrereq(self)
1696 763ad5be Thomas Thrainer
1697 763ad5be Thomas Thrainer
1698 763ad5be Thomas Thrainer
class LUInstanceActivateDisks(NoHooksLU):
1699 763ad5be Thomas Thrainer
  """Bring up an instance's disks.
1700 763ad5be Thomas Thrainer

1701 763ad5be Thomas Thrainer
  """
1702 763ad5be Thomas Thrainer
  REQ_BGL = False
1703 763ad5be Thomas Thrainer
1704 763ad5be Thomas Thrainer
  def ExpandNames(self):
1705 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1706 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1707 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1708 763ad5be Thomas Thrainer
1709 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1710 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1711 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1712 763ad5be Thomas Thrainer
1713 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1714 763ad5be Thomas Thrainer
    """Check prerequisites.
1715 763ad5be Thomas Thrainer

1716 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1717 763ad5be Thomas Thrainer

1718 763ad5be Thomas Thrainer
    """
1719 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1720 763ad5be Thomas Thrainer
    assert self.instance is not None, \
1721 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1722 5eacbcae Thomas Thrainer
    CheckNodeOnline(self, self.instance.primary_node)
1723 763ad5be Thomas Thrainer
1724 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1725 763ad5be Thomas Thrainer
    """Activate the disks.
1726 763ad5be Thomas Thrainer

1727 763ad5be Thomas Thrainer
    """
1728 763ad5be Thomas Thrainer
    disks_ok, disks_info = \
1729 5eacbcae Thomas Thrainer
              AssembleInstanceDisks(self, self.instance,
1730 5eacbcae Thomas Thrainer
                                    ignore_size=self.op.ignore_size)
1731 763ad5be Thomas Thrainer
    if not disks_ok:
1732 763ad5be Thomas Thrainer
      raise errors.OpExecError("Cannot activate block devices")
1733 763ad5be Thomas Thrainer
1734 763ad5be Thomas Thrainer
    if self.op.wait_for_sync:
1735 5eacbcae Thomas Thrainer
      if not WaitForSync(self, self.instance):
1736 da4a52a3 Thomas Thrainer
        self.cfg.MarkInstanceDisksInactive(self.instance.uuid)
1737 763ad5be Thomas Thrainer
        raise errors.OpExecError("Some disks of the instance are degraded!")
1738 763ad5be Thomas Thrainer
1739 763ad5be Thomas Thrainer
    return disks_info
1740 763ad5be Thomas Thrainer
1741 763ad5be Thomas Thrainer
1742 763ad5be Thomas Thrainer
class LUInstanceDeactivateDisks(NoHooksLU):
1743 763ad5be Thomas Thrainer
  """Shutdown an instance's disks.
1744 763ad5be Thomas Thrainer

1745 763ad5be Thomas Thrainer
  """
1746 763ad5be Thomas Thrainer
  REQ_BGL = False
1747 763ad5be Thomas Thrainer
1748 763ad5be Thomas Thrainer
  def ExpandNames(self):
1749 763ad5be Thomas Thrainer
    self._ExpandAndLockInstance()
1750 763ad5be Thomas Thrainer
    self.needed_locks[locking.LEVEL_NODE] = []
1751 763ad5be Thomas Thrainer
    self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1752 763ad5be Thomas Thrainer
1753 763ad5be Thomas Thrainer
  def DeclareLocks(self, level):
1754 763ad5be Thomas Thrainer
    if level == locking.LEVEL_NODE:
1755 763ad5be Thomas Thrainer
      self._LockInstancesNodes()
1756 763ad5be Thomas Thrainer
1757 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1758 763ad5be Thomas Thrainer
    """Check prerequisites.
1759 763ad5be Thomas Thrainer

1760 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1761 763ad5be Thomas Thrainer

1762 763ad5be Thomas Thrainer
    """
1763 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1764 763ad5be Thomas Thrainer
    assert self.instance is not None, \
1765 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.op.instance_name
1766 763ad5be Thomas Thrainer
1767 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
1768 763ad5be Thomas Thrainer
    """Deactivate the disks
1769 763ad5be Thomas Thrainer

1770 763ad5be Thomas Thrainer
    """
1771 763ad5be Thomas Thrainer
    if self.op.force:
1772 d0d7d7cf Thomas Thrainer
      ShutdownInstanceDisks(self, self.instance)
1773 763ad5be Thomas Thrainer
    else:
1774 d0d7d7cf Thomas Thrainer
      _SafeShutdownInstanceDisks(self, self.instance)
1775 763ad5be Thomas Thrainer
1776 763ad5be Thomas Thrainer
1777 1c3231aa Thomas Thrainer
def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
1778 763ad5be Thomas Thrainer
                               ldisk=False):
1779 763ad5be Thomas Thrainer
  """Check that mirrors are not degraded.
1780 763ad5be Thomas Thrainer

1781 763ad5be Thomas Thrainer
  @attention: The device has to be annotated already.
1782 763ad5be Thomas Thrainer

1783 763ad5be Thomas Thrainer
  The ldisk parameter, if True, will change the test from the
1784 763ad5be Thomas Thrainer
  is_degraded attribute (which represents overall non-ok status for
1785 763ad5be Thomas Thrainer
  the device(s)) to the ldisk (representing the local storage status).
1786 763ad5be Thomas Thrainer

1787 763ad5be Thomas Thrainer
  """
1788 763ad5be Thomas Thrainer
  result = True
1789 763ad5be Thomas Thrainer
1790 763ad5be Thomas Thrainer
  if on_primary or dev.AssembleOnSecondary():
1791 0c3d9c7c Thomas Thrainer
    rstats = lu.rpc.call_blockdev_find(node_uuid, (dev, instance))
1792 763ad5be Thomas Thrainer
    msg = rstats.fail_msg
1793 763ad5be Thomas Thrainer
    if msg:
1794 1c3231aa Thomas Thrainer
      lu.LogWarning("Can't find disk on node %s: %s",
1795 1c3231aa Thomas Thrainer
                    lu.cfg.GetNodeName(node_uuid), msg)
1796 763ad5be Thomas Thrainer
      result = False
1797 763ad5be Thomas Thrainer
    elif not rstats.payload:
1798 1c3231aa Thomas Thrainer
      lu.LogWarning("Can't find disk on node %s", lu.cfg.GetNodeName(node_uuid))
1799 763ad5be Thomas Thrainer
      result = False
1800 763ad5be Thomas Thrainer
    else:
1801 763ad5be Thomas Thrainer
      if ldisk:
1802 763ad5be Thomas Thrainer
        result = result and rstats.payload.ldisk_status == constants.LDS_OKAY
1803 763ad5be Thomas Thrainer
      else:
1804 763ad5be Thomas Thrainer
        result = result and not rstats.payload.is_degraded
1805 763ad5be Thomas Thrainer
1806 763ad5be Thomas Thrainer
  if dev.children:
1807 763ad5be Thomas Thrainer
    for child in dev.children:
1808 1c3231aa Thomas Thrainer
      result = result and _CheckDiskConsistencyInner(lu, instance, child,
1809 1c3231aa Thomas Thrainer
                                                     node_uuid, on_primary)
1810 763ad5be Thomas Thrainer
1811 763ad5be Thomas Thrainer
  return result
1812 763ad5be Thomas Thrainer
1813 763ad5be Thomas Thrainer
1814 1c3231aa Thomas Thrainer
def CheckDiskConsistency(lu, instance, dev, node_uuid, on_primary, ldisk=False):
1815 763ad5be Thomas Thrainer
  """Wrapper around L{_CheckDiskConsistencyInner}.
1816 763ad5be Thomas Thrainer

1817 763ad5be Thomas Thrainer
  """
1818 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
1819 1c3231aa Thomas Thrainer
  return _CheckDiskConsistencyInner(lu, instance, disk, node_uuid, on_primary,
1820 763ad5be Thomas Thrainer
                                    ldisk=ldisk)
1821 763ad5be Thomas Thrainer
1822 763ad5be Thomas Thrainer
1823 1c3231aa Thomas Thrainer
def _BlockdevFind(lu, node_uuid, dev, instance):
1824 763ad5be Thomas Thrainer
  """Wrapper around call_blockdev_find to annotate diskparams.
1825 763ad5be Thomas Thrainer

1826 763ad5be Thomas Thrainer
  @param lu: A reference to the lu object
1827 1c3231aa Thomas Thrainer
  @param node_uuid: The node to call out
1828 763ad5be Thomas Thrainer
  @param dev: The device to find
1829 763ad5be Thomas Thrainer
  @param instance: The instance object the device belongs to
1830 763ad5be Thomas Thrainer
  @returns The result of the rpc call
1831 763ad5be Thomas Thrainer

1832 763ad5be Thomas Thrainer
  """
1833 5eacbcae Thomas Thrainer
  (disk,) = AnnotateDiskParams(instance, [dev], lu.cfg)
1834 0c3d9c7c Thomas Thrainer
  return lu.rpc.call_blockdev_find(node_uuid, (disk, instance))
1835 763ad5be Thomas Thrainer
1836 763ad5be Thomas Thrainer
1837 763ad5be Thomas Thrainer
def _GenerateUniqueNames(lu, exts):
1838 763ad5be Thomas Thrainer
  """Generate a suitable LV name.
1839 763ad5be Thomas Thrainer

1840 763ad5be Thomas Thrainer
  This will generate a logical volume name for the given instance.
1841 763ad5be Thomas Thrainer

1842 763ad5be Thomas Thrainer
  """
1843 763ad5be Thomas Thrainer
  results = []
1844 763ad5be Thomas Thrainer
  for val in exts:
1845 763ad5be Thomas Thrainer
    new_id = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
1846 763ad5be Thomas Thrainer
    results.append("%s%s" % (new_id, val))
1847 763ad5be Thomas Thrainer
  return results
1848 763ad5be Thomas Thrainer
1849 763ad5be Thomas Thrainer
1850 763ad5be Thomas Thrainer
class TLReplaceDisks(Tasklet):
1851 763ad5be Thomas Thrainer
  """Replaces disks for an instance.
1852 763ad5be Thomas Thrainer

1853 763ad5be Thomas Thrainer
  Note: Locking is not within the scope of this class.
1854 763ad5be Thomas Thrainer

1855 763ad5be Thomas Thrainer
  """
1856 da4a52a3 Thomas Thrainer
  def __init__(self, lu, instance_uuid, instance_name, mode, iallocator_name,
1857 da4a52a3 Thomas Thrainer
               remote_node_uuid, disks, early_release, ignore_ipolicy):
1858 763ad5be Thomas Thrainer
    """Initializes this class.
1859 763ad5be Thomas Thrainer

1860 763ad5be Thomas Thrainer
    """
1861 763ad5be Thomas Thrainer
    Tasklet.__init__(self, lu)
1862 763ad5be Thomas Thrainer
1863 763ad5be Thomas Thrainer
    # Parameters
1864 da4a52a3 Thomas Thrainer
    self.instance_uuid = instance_uuid
1865 763ad5be Thomas Thrainer
    self.instance_name = instance_name
1866 763ad5be Thomas Thrainer
    self.mode = mode
1867 763ad5be Thomas Thrainer
    self.iallocator_name = iallocator_name
1868 1c3231aa Thomas Thrainer
    self.remote_node_uuid = remote_node_uuid
1869 763ad5be Thomas Thrainer
    self.disks = disks
1870 763ad5be Thomas Thrainer
    self.early_release = early_release
1871 763ad5be Thomas Thrainer
    self.ignore_ipolicy = ignore_ipolicy
1872 763ad5be Thomas Thrainer
1873 763ad5be Thomas Thrainer
    # Runtime data
1874 763ad5be Thomas Thrainer
    self.instance = None
1875 1c3231aa Thomas Thrainer
    self.new_node_uuid = None
1876 1c3231aa Thomas Thrainer
    self.target_node_uuid = None
1877 1c3231aa Thomas Thrainer
    self.other_node_uuid = None
1878 763ad5be Thomas Thrainer
    self.remote_node_info = None
1879 763ad5be Thomas Thrainer
    self.node_secondary_ip = None
1880 763ad5be Thomas Thrainer
1881 763ad5be Thomas Thrainer
  @staticmethod
1882 da4a52a3 Thomas Thrainer
  def _RunAllocator(lu, iallocator_name, instance_uuid,
1883 1c3231aa Thomas Thrainer
                    relocate_from_node_uuids):
1884 763ad5be Thomas Thrainer
    """Compute a new secondary node using an IAllocator.
1885 763ad5be Thomas Thrainer

1886 763ad5be Thomas Thrainer
    """
1887 1c3231aa Thomas Thrainer
    req = iallocator.IAReqRelocate(
1888 da4a52a3 Thomas Thrainer
          inst_uuid=instance_uuid,
1889 1c3231aa Thomas Thrainer
          relocate_from_node_uuids=list(relocate_from_node_uuids))
1890 763ad5be Thomas Thrainer
    ial = iallocator.IAllocator(lu.cfg, lu.rpc, req)
1891 763ad5be Thomas Thrainer
1892 763ad5be Thomas Thrainer
    ial.Run(iallocator_name)
1893 763ad5be Thomas Thrainer
1894 763ad5be Thomas Thrainer
    if not ial.success:
1895 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can't compute nodes using iallocator '%s':"
1896 763ad5be Thomas Thrainer
                                 " %s" % (iallocator_name, ial.info),
1897 763ad5be Thomas Thrainer
                                 errors.ECODE_NORES)
1898 763ad5be Thomas Thrainer
1899 763ad5be Thomas Thrainer
    remote_node_name = ial.result[0]
1900 1c3231aa Thomas Thrainer
    remote_node = lu.cfg.GetNodeInfoByName(remote_node_name)
1901 1c3231aa Thomas Thrainer
1902 1c3231aa Thomas Thrainer
    if remote_node is None:
1903 1c3231aa Thomas Thrainer
      raise errors.OpPrereqError("Node %s not found in configuration" %
1904 1c3231aa Thomas Thrainer
                                 remote_node_name, errors.ECODE_NOENT)
1905 763ad5be Thomas Thrainer
1906 763ad5be Thomas Thrainer
    lu.LogInfo("Selected new secondary for instance '%s': %s",
1907 da4a52a3 Thomas Thrainer
               instance_uuid, remote_node_name)
1908 763ad5be Thomas Thrainer
1909 1c3231aa Thomas Thrainer
    return remote_node.uuid
1910 763ad5be Thomas Thrainer
1911 1c3231aa Thomas Thrainer
  def _FindFaultyDisks(self, node_uuid):
1912 5eacbcae Thomas Thrainer
    """Wrapper for L{FindFaultyInstanceDisks}.
1913 763ad5be Thomas Thrainer

1914 763ad5be Thomas Thrainer
    """
1915 5eacbcae Thomas Thrainer
    return FindFaultyInstanceDisks(self.cfg, self.rpc, self.instance,
1916 1c3231aa Thomas Thrainer
                                   node_uuid, True)
1917 763ad5be Thomas Thrainer
1918 763ad5be Thomas Thrainer
  def _CheckDisksActivated(self, instance):
1919 763ad5be Thomas Thrainer
    """Checks if the instance disks are activated.
1920 763ad5be Thomas Thrainer

1921 763ad5be Thomas Thrainer
    @param instance: The instance to check disks
1922 763ad5be Thomas Thrainer
    @return: True if they are activated, False otherwise
1923 763ad5be Thomas Thrainer

1924 763ad5be Thomas Thrainer
    """
1925 1c3231aa Thomas Thrainer
    node_uuids = instance.all_nodes
1926 763ad5be Thomas Thrainer
1927 763ad5be Thomas Thrainer
    for idx, dev in enumerate(instance.disks):
1928 1c3231aa Thomas Thrainer
      for node_uuid in node_uuids:
1929 1c3231aa Thomas Thrainer
        self.lu.LogInfo("Checking disk/%d on %s", idx,
1930 1c3231aa Thomas Thrainer
                        self.cfg.GetNodeName(node_uuid))
1931 763ad5be Thomas Thrainer
1932 1c3231aa Thomas Thrainer
        result = _BlockdevFind(self, node_uuid, dev, instance)
1933 763ad5be Thomas Thrainer
1934 763ad5be Thomas Thrainer
        if result.offline:
1935 763ad5be Thomas Thrainer
          continue
1936 763ad5be Thomas Thrainer
        elif result.fail_msg or not result.payload:
1937 763ad5be Thomas Thrainer
          return False
1938 763ad5be Thomas Thrainer
1939 763ad5be Thomas Thrainer
    return True
1940 763ad5be Thomas Thrainer
1941 763ad5be Thomas Thrainer
  def CheckPrereq(self):
1942 763ad5be Thomas Thrainer
    """Check prerequisites.
1943 763ad5be Thomas Thrainer

1944 763ad5be Thomas Thrainer
    This checks that the instance is in the cluster.
1945 763ad5be Thomas Thrainer

1946 763ad5be Thomas Thrainer
    """
1947 da4a52a3 Thomas Thrainer
    self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
1948 d0d7d7cf Thomas Thrainer
    assert self.instance is not None, \
1949 763ad5be Thomas Thrainer
      "Cannot retrieve locked instance %s" % self.instance_name
1950 763ad5be Thomas Thrainer
1951 d0d7d7cf Thomas Thrainer
    if self.instance.disk_template != constants.DT_DRBD8:
1952 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
1953 763ad5be Thomas Thrainer
                                 " instances", errors.ECODE_INVAL)
1954 763ad5be Thomas Thrainer
1955 d0d7d7cf Thomas Thrainer
    if len(self.instance.secondary_nodes) != 1:
1956 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The instance has a strange layout,"
1957 763ad5be Thomas Thrainer
                                 " expected one secondary but found %d" %
1958 d0d7d7cf Thomas Thrainer
                                 len(self.instance.secondary_nodes),
1959 763ad5be Thomas Thrainer
                                 errors.ECODE_FAULT)
1960 763ad5be Thomas Thrainer
1961 d0d7d7cf Thomas Thrainer
    secondary_node_uuid = self.instance.secondary_nodes[0]
1962 763ad5be Thomas Thrainer
1963 763ad5be Thomas Thrainer
    if self.iallocator_name is None:
1964 1c3231aa Thomas Thrainer
      remote_node_uuid = self.remote_node_uuid
1965 763ad5be Thomas Thrainer
    else:
1966 1c3231aa Thomas Thrainer
      remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name,
1967 da4a52a3 Thomas Thrainer
                                            self.instance.uuid,
1968 d0d7d7cf Thomas Thrainer
                                            self.instance.secondary_nodes)
1969 763ad5be Thomas Thrainer
1970 1c3231aa Thomas Thrainer
    if remote_node_uuid is None:
1971 763ad5be Thomas Thrainer
      self.remote_node_info = None
1972 763ad5be Thomas Thrainer
    else:
1973 1c3231aa Thomas Thrainer
      assert remote_node_uuid in self.lu.owned_locks(locking.LEVEL_NODE), \
1974 1c3231aa Thomas Thrainer
             "Remote node '%s' is not locked" % remote_node_uuid
1975 763ad5be Thomas Thrainer
1976 1c3231aa Thomas Thrainer
      self.remote_node_info = self.cfg.GetNodeInfo(remote_node_uuid)
1977 763ad5be Thomas Thrainer
      assert self.remote_node_info is not None, \
1978 1c3231aa Thomas Thrainer
        "Cannot retrieve locked node %s" % remote_node_uuid
1979 763ad5be Thomas Thrainer
1980 1c3231aa Thomas Thrainer
    if remote_node_uuid == self.instance.primary_node:
1981 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The specified node is the primary node of"
1982 763ad5be Thomas Thrainer
                                 " the instance", errors.ECODE_INVAL)
1983 763ad5be Thomas Thrainer
1984 1c3231aa Thomas Thrainer
    if remote_node_uuid == secondary_node_uuid:
1985 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("The specified node is already the"
1986 763ad5be Thomas Thrainer
                                 " secondary node of the instance",
1987 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
1988 763ad5be Thomas Thrainer
1989 763ad5be Thomas Thrainer
    if self.disks and self.mode in (constants.REPLACE_DISK_AUTO,
1990 763ad5be Thomas Thrainer
                                    constants.REPLACE_DISK_CHG):
1991 763ad5be Thomas Thrainer
      raise errors.OpPrereqError("Cannot specify disks to be replaced",
1992 763ad5be Thomas Thrainer
                                 errors.ECODE_INVAL)
1993 763ad5be Thomas Thrainer
1994 763ad5be Thomas Thrainer
    if self.mode == constants.REPLACE_DISK_AUTO:
1995 d0d7d7cf Thomas Thrainer
      if not self._CheckDisksActivated(self.instance):
1996 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Please run activate-disks on instance %s"
1997 763ad5be Thomas Thrainer
                                   " first" % self.instance_name,
1998 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
1999 d0d7d7cf Thomas Thrainer
      faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
2000 1c3231aa Thomas Thrainer
      faulty_secondary = self._FindFaultyDisks(secondary_node_uuid)
2001 763ad5be Thomas Thrainer
2002 763ad5be Thomas Thrainer
      if faulty_primary and faulty_secondary:
2003 763ad5be Thomas Thrainer
        raise errors.OpPrereqError("Instance %s has faulty disks on more than"
2004 763ad5be Thomas Thrainer
                                   " one node and can not be repaired"
2005 763ad5be Thomas Thrainer
                                   " automatically" % self.instance_name,
2006 763ad5be Thomas Thrainer
                                   errors.ECODE_STATE)
2007 763ad5be Thomas Thrainer
2008 763ad5be Thomas Thrainer
      if faulty_primary:
2009 763ad5be Thomas Thrainer
        self.disks = faulty_primary
2010 d0d7d7cf Thomas Thrainer
        self.target_node_uuid = self.instance.primary_node
2011 1c3231aa Thomas Thrainer
        self.other_node_uuid = secondary_node_uuid
2012 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2013 763ad5be Thomas Thrainer
      elif faulty_secondary:
2014 763ad5be Thomas Thrainer
        self.disks = faulty_secondary
2015 1c3231aa Thomas Thrainer
        self.target_node_uuid = secondary_node_uuid
2016 d0d7d7cf Thomas Thrainer
        self.other_node_uuid = self.instance.primary_node
2017 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2018 763ad5be Thomas Thrainer
      else:
2019 763ad5be Thomas Thrainer
        self.disks = []
2020 763ad5be Thomas Thrainer
        check_nodes = []
2021 763ad5be Thomas Thrainer
2022 763ad5be Thomas Thrainer
    else:
2023 763ad5be Thomas Thrainer
      # Non-automatic modes
2024 763ad5be Thomas Thrainer
      if self.mode == constants.REPLACE_DISK_PRI:
2025 d0d7d7cf Thomas Thrainer
        self.target_node_uuid = self.instance.primary_node
2026 1c3231aa Thomas Thrainer
        self.other_node_uuid = secondary_node_uuid
2027 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2028 763ad5be Thomas Thrainer
2029 763ad5be Thomas Thrainer
      elif self.mode == constants.REPLACE_DISK_SEC:
2030 1c3231aa Thomas Thrainer
        self.target_node_uuid = secondary_node_uuid
2031 d0d7d7cf Thomas Thrainer
        self.other_node_uuid = self.instance.primary_node
2032 1c3231aa Thomas Thrainer
        check_nodes = [self.target_node_uuid, self.other_node_uuid]
2033 763ad5be Thomas Thrainer
2034 763ad5be Thomas Thrainer
      elif self.mode == constants.REPLACE_DISK_CHG:
2035 1c3231aa Thomas Thrainer
        self.new_node_uuid = remote_node_uuid
2036 d0d7d7cf Thomas Thrainer
        self.other_node_uuid = self.instance.primary_node
2037 1c3231aa Thomas Thrainer
        self.target_node_uuid = secondary_node_uuid
2038 1c3231aa Thomas Thrainer
        check_nodes = [self.new_node_uuid, self.other_node_uuid]
2039 763ad5be Thomas Thrainer
2040 1c3231aa Thomas Thrainer
        CheckNodeNotDrained(self.lu, remote_node_uuid)
2041 1c3231aa Thomas Thrainer
        CheckNodeVmCapable(self.lu, remote_node_uuid)
2042 763ad5be Thomas Thrainer
2043 1c3231aa Thomas Thrainer
        old_node_info = self.cfg.GetNodeInfo(secondary_node_uuid)
2044 763ad5be Thomas Thrainer
        assert old_node_info is not None
2045 763ad5be Thomas Thrainer
        if old_node_info.offline and not self.early_release:
2046 763ad5be Thomas Thrainer
          # doesn't make sense to delay the release
2047 763ad5be Thomas Thrainer
          self.early_release = True
2048 763ad5be Thomas Thrainer
          self.lu.LogInfo("Old secondary %s is offline, automatically enabling"
2049 1c3231aa Thomas Thrainer
                          " early-release mode", secondary_node_uuid)
2050 763ad5be Thomas Thrainer
2051 763ad5be Thomas Thrainer
      else:
2052 763ad5be Thomas Thrainer
        raise errors.ProgrammerError("Unhandled disk replace mode (%s)" %
2053 763ad5be Thomas Thrainer
                                     self.mode)
2054 763ad5be Thomas Thrainer
2055 763ad5be Thomas Thrainer
      # If not specified all disks should be replaced
2056 763ad5be Thomas Thrainer
      if not self.disks:
2057 763ad5be Thomas Thrainer
        self.disks = range(len(self.instance.disks))
2058 763ad5be Thomas Thrainer
2059 763ad5be Thomas Thrainer
    # TODO: This is ugly, but right now we can't distinguish between internal
2060 763ad5be Thomas Thrainer
    # submitted opcode and external one. We should fix that.
2061 763ad5be Thomas Thrainer
    if self.remote_node_info:
2062 763ad5be Thomas Thrainer
      # We change the node, lets verify it still meets instance policy
2063 763ad5be Thomas Thrainer
      new_group_info = self.cfg.GetNodeGroup(self.remote_node_info.group)
2064 763ad5be Thomas Thrainer
      cluster = self.cfg.GetClusterInfo()
2065 763ad5be Thomas Thrainer
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2066 763ad5be Thomas Thrainer
                                                              new_group_info)
2067 d0d7d7cf Thomas Thrainer
      CheckTargetNodeIPolicy(self, ipolicy, self.instance,
2068 d0d7d7cf Thomas Thrainer
                             self.remote_node_info, self.cfg,
2069 d0d7d7cf Thomas Thrainer
                             ignore=self.ignore_ipolicy)
2070 763ad5be Thomas Thrainer
2071 1c3231aa Thomas Thrainer
    for node_uuid in check_nodes:
2072 1c3231aa Thomas Thrainer
      CheckNodeOnline(self.lu, node_uuid)
2073 763ad5be Thomas Thrainer
2074 1c3231aa Thomas Thrainer
    touched_nodes = frozenset(node_uuid for node_uuid in [self.new_node_uuid,
2075 1c3231aa Thomas Thrainer
                                                          self.other_node_uuid,
2076 1c3231aa Thomas Thrainer
                                                          self.target_node_uuid]
2077 1c3231aa Thomas Thrainer
                              if node_uuid is not None)
2078 763ad5be Thomas Thrainer
2079 763ad5be Thomas Thrainer
    # Release unneeded node and node resource locks
2080 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
2081 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
2082 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
2083 763ad5be Thomas Thrainer
2084 763ad5be Thomas Thrainer
    # Release any owned node group
2085 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
2086 763ad5be Thomas Thrainer
2087 763ad5be Thomas Thrainer
    # Check whether disks are valid
2088 763ad5be Thomas Thrainer
    for disk_idx in self.disks:
2089 d0d7d7cf Thomas Thrainer
      self.instance.FindDisk(disk_idx)
2090 763ad5be Thomas Thrainer
2091 763ad5be Thomas Thrainer
    # Get secondary node IP addresses
2092 1c3231aa Thomas Thrainer
    self.node_secondary_ip = dict((uuid, node.secondary_ip) for (uuid, node)
2093 763ad5be Thomas Thrainer
                                  in self.cfg.GetMultiNodeInfo(touched_nodes))
2094 763ad5be Thomas Thrainer
2095 763ad5be Thomas Thrainer
  def Exec(self, feedback_fn):
2096 763ad5be Thomas Thrainer
    """Execute disk replacement.
2097 763ad5be Thomas Thrainer

2098 763ad5be Thomas Thrainer
    This dispatches the disk replacement to the appropriate handler.
2099 763ad5be Thomas Thrainer

2100 763ad5be Thomas Thrainer
    """
2101 763ad5be Thomas Thrainer
    if __debug__:
2102 763ad5be Thomas Thrainer
      # Verify owned locks before starting operation
2103 763ad5be Thomas Thrainer
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE)
2104 763ad5be Thomas Thrainer
      assert set(owned_nodes) == set(self.node_secondary_ip), \
2105 763ad5be Thomas Thrainer
          ("Incorrect node locks, owning %s, expected %s" %
2106 763ad5be Thomas Thrainer
           (owned_nodes, self.node_secondary_ip.keys()))
2107 763ad5be Thomas Thrainer
      assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
2108 763ad5be Thomas Thrainer
              self.lu.owned_locks(locking.LEVEL_NODE_RES))
2109 763ad5be Thomas Thrainer
      assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
2110 763ad5be Thomas Thrainer
2111 763ad5be Thomas Thrainer
      owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
2112 763ad5be Thomas Thrainer
      assert list(owned_instances) == [self.instance_name], \
2113 763ad5be Thomas Thrainer
          "Instance '%s' not locked" % self.instance_name
2114 763ad5be Thomas Thrainer
2115 763ad5be Thomas Thrainer
      assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
2116 763ad5be Thomas Thrainer
          "Should not own any node group lock at this point"
2117 763ad5be Thomas Thrainer
2118 763ad5be Thomas Thrainer
    if not self.disks:
2119 763ad5be Thomas Thrainer
      feedback_fn("No disks need replacement for instance '%s'" %
2120 763ad5be Thomas Thrainer
                  self.instance.name)
2121 763ad5be Thomas Thrainer
      return
2122 763ad5be Thomas Thrainer
2123 763ad5be Thomas Thrainer
    feedback_fn("Replacing disk(s) %s for instance '%s'" %
2124 763ad5be Thomas Thrainer
                (utils.CommaJoin(self.disks), self.instance.name))
2125 1c3231aa Thomas Thrainer
    feedback_fn("Current primary node: %s" %
2126 1c3231aa Thomas Thrainer
                self.cfg.GetNodeName(self.instance.primary_node))
2127 763ad5be Thomas Thrainer
    feedback_fn("Current seconary node: %s" %
2128 1c3231aa Thomas Thrainer
                utils.CommaJoin(self.cfg.GetNodeNames(
2129 1c3231aa Thomas Thrainer
                                  self.instance.secondary_nodes)))
2130 763ad5be Thomas Thrainer
2131 1d4a4b26 Thomas Thrainer
    activate_disks = not self.instance.disks_active
2132 763ad5be Thomas Thrainer
2133 763ad5be Thomas Thrainer
    # Activate the instance disks if we're replacing them on a down instance
2134 763ad5be Thomas Thrainer
    if activate_disks:
2135 5eacbcae Thomas Thrainer
      StartInstanceDisks(self.lu, self.instance, True)
2136 763ad5be Thomas Thrainer
2137 763ad5be Thomas Thrainer
    try:
2138 763ad5be Thomas Thrainer
      # Should we replace the secondary node?
2139 1c3231aa Thomas Thrainer
      if self.new_node_uuid is not None:
2140 763ad5be Thomas Thrainer
        fn = self._ExecDrbd8Secondary
2141 763ad5be Thomas Thrainer
      else:
2142 763ad5be Thomas Thrainer
        fn = self._ExecDrbd8DiskOnly
2143 763ad5be Thomas Thrainer
2144 763ad5be Thomas Thrainer
      result = fn(feedback_fn)
2145 763ad5be Thomas Thrainer
    finally:
2146 763ad5be Thomas Thrainer
      # Deactivate the instance disks if we're replacing them on a
2147 763ad5be Thomas Thrainer
      # down instance
2148 763ad5be Thomas Thrainer
      if activate_disks:
2149 763ad5be Thomas Thrainer
        _SafeShutdownInstanceDisks(self.lu, self.instance)
2150 763ad5be Thomas Thrainer
2151 763ad5be Thomas Thrainer
    assert not self.lu.owned_locks(locking.LEVEL_NODE)
2152 763ad5be Thomas Thrainer
2153 763ad5be Thomas Thrainer
    if __debug__:
2154 763ad5be Thomas Thrainer
      # Verify owned locks
2155 763ad5be Thomas Thrainer
      owned_nodes = self.lu.owned_locks(locking.LEVEL_NODE_RES)
2156 763ad5be Thomas Thrainer
      nodes = frozenset(self.node_secondary_ip)
2157 763ad5be Thomas Thrainer
      assert ((self.early_release and not owned_nodes) or
2158 763ad5be Thomas Thrainer
              (not self.early_release and not (set(owned_nodes) - nodes))), \
2159 763ad5be Thomas Thrainer
        ("Not owning the correct locks, early_release=%s, owned=%r,"
2160 763ad5be Thomas Thrainer
         " nodes=%r" % (self.early_release, owned_nodes, nodes))
2161 763ad5be Thomas Thrainer
2162 763ad5be Thomas Thrainer
    return result
2163 763ad5be Thomas Thrainer
2164 1c3231aa Thomas Thrainer
  def _CheckVolumeGroup(self, node_uuids):
2165 763ad5be Thomas Thrainer
    self.lu.LogInfo("Checking volume groups")
2166 763ad5be Thomas Thrainer
2167 763ad5be Thomas Thrainer
    vgname = self.cfg.GetVGName()
2168 763ad5be Thomas Thrainer
2169 763ad5be Thomas Thrainer
    # Make sure volume group exists on all involved nodes
2170 1c3231aa Thomas Thrainer
    results = self.rpc.call_vg_list(node_uuids)
2171 763ad5be Thomas Thrainer
    if not results:
2172 763ad5be Thomas Thrainer
      raise errors.OpExecError("Can't list volume groups on the nodes")
2173 763ad5be Thomas Thrainer
2174 1c3231aa Thomas Thrainer
    for node_uuid in node_uuids:
2175 1c3231aa Thomas Thrainer
      res = results[node_uuid]
2176 1c3231aa Thomas Thrainer
      res.Raise("Error checking node %s" % self.cfg.GetNodeName(node_uuid))
2177 763ad5be Thomas Thrainer
      if vgname not in res.payload:
2178 763ad5be Thomas Thrainer
        raise errors.OpExecError("Volume group '%s' not found on node %s" %
2179 1c3231aa Thomas Thrainer
                                 (vgname, self.cfg.GetNodeName(node_uuid)))
2180 763ad5be Thomas Thrainer
2181 1c3231aa Thomas Thrainer
  def _CheckDisksExistence(self, node_uuids):
2182 763ad5be Thomas Thrainer
    # Check disk existence
2183 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2184 763ad5be Thomas Thrainer
      if idx not in self.disks:
2185 763ad5be Thomas Thrainer
        continue
2186 763ad5be Thomas Thrainer
2187 1c3231aa Thomas Thrainer
      for node_uuid in node_uuids:
2188 1c3231aa Thomas Thrainer
        self.lu.LogInfo("Checking disk/%d on %s", idx,
2189 1c3231aa Thomas Thrainer
                        self.cfg.GetNodeName(node_uuid))
2190 763ad5be Thomas Thrainer
2191 1c3231aa Thomas Thrainer
        result = _BlockdevFind(self, node_uuid, dev, self.instance)
2192 763ad5be Thomas Thrainer
2193 763ad5be Thomas Thrainer
        msg = result.fail_msg
2194 763ad5be Thomas Thrainer
        if msg or not result.payload:
2195 763ad5be Thomas Thrainer
          if not msg:
2196 763ad5be Thomas Thrainer
            msg = "disk not found"
2197 34ea8da3 Michele Tartara
          if not self._CheckDisksActivated(self.instance):
2198 34ea8da3 Michele Tartara
            extra_hint = ("\nDisks seem to be not properly activated. Try"
2199 34ea8da3 Michele Tartara
                          " running activate-disks on the instance before"
2200 34ea8da3 Michele Tartara
                          " using replace-disks.")
2201 34ea8da3 Michele Tartara
          else:
2202 34ea8da3 Michele Tartara
            extra_hint = ""
2203 34ea8da3 Michele Tartara
          raise errors.OpExecError("Can't find disk/%d on node %s: %s%s" %
2204 f9dfa8df Klaus Aehlig
                                   (idx, self.cfg.GetNodeName(node_uuid), msg,
2205 f9dfa8df Klaus Aehlig
                                    extra_hint))
2206 763ad5be Thomas Thrainer
2207 1c3231aa Thomas Thrainer
  def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk):
2208 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2209 763ad5be Thomas Thrainer
      if idx not in self.disks:
2210 763ad5be Thomas Thrainer
        continue
2211 763ad5be Thomas Thrainer
2212 763ad5be Thomas Thrainer
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
2213 1c3231aa Thomas Thrainer
                      (idx, self.cfg.GetNodeName(node_uuid)))
2214 763ad5be Thomas Thrainer
2215 1c3231aa Thomas Thrainer
      if not CheckDiskConsistency(self.lu, self.instance, dev, node_uuid,
2216 5eacbcae Thomas Thrainer
                                  on_primary, ldisk=ldisk):
2217 763ad5be Thomas Thrainer
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
2218 763ad5be Thomas Thrainer
                                 " replace disks for instance %s" %
2219 1c3231aa Thomas Thrainer
                                 (self.cfg.GetNodeName(node_uuid),
2220 1c3231aa Thomas Thrainer
                                  self.instance.name))
2221 763ad5be Thomas Thrainer
2222 1c3231aa Thomas Thrainer
  def _CreateNewStorage(self, node_uuid):
2223 763ad5be Thomas Thrainer
    """Create new storage on the primary or secondary node.
2224 763ad5be Thomas Thrainer

2225 763ad5be Thomas Thrainer
    This is only used for same-node replaces, not for changing the
2226 763ad5be Thomas Thrainer
    secondary node, hence we don't want to modify the existing disk.
2227 763ad5be Thomas Thrainer

2228 763ad5be Thomas Thrainer
    """
2229 763ad5be Thomas Thrainer
    iv_names = {}
2230 763ad5be Thomas Thrainer
2231 5eacbcae Thomas Thrainer
    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
2232 763ad5be Thomas Thrainer
    for idx, dev in enumerate(disks):
2233 763ad5be Thomas Thrainer
      if idx not in self.disks:
2234 763ad5be Thomas Thrainer
        continue
2235 763ad5be Thomas Thrainer
2236 1c3231aa Thomas Thrainer
      self.lu.LogInfo("Adding storage on %s for disk/%d",
2237 1c3231aa Thomas Thrainer
                      self.cfg.GetNodeName(node_uuid), idx)
2238 763ad5be Thomas Thrainer
2239 763ad5be Thomas Thrainer
      lv_names = [".disk%d_%s" % (idx, suffix) for suffix in ["data", "meta"]]
2240 763ad5be Thomas Thrainer
      names = _GenerateUniqueNames(self.lu, lv_names)
2241 763ad5be Thomas Thrainer
2242 763ad5be Thomas Thrainer
      (data_disk, meta_disk) = dev.children
2243 763ad5be Thomas Thrainer
      vg_data = data_disk.logical_id[0]
2244 cd3b4ff4 Helga Velroyen
      lv_data = objects.Disk(dev_type=constants.DT_PLAIN, size=dev.size,
2245 763ad5be Thomas Thrainer
                             logical_id=(vg_data, names[0]),
2246 763ad5be Thomas Thrainer
                             params=data_disk.params)
2247 763ad5be Thomas Thrainer
      vg_meta = meta_disk.logical_id[0]
2248 cd3b4ff4 Helga Velroyen
      lv_meta = objects.Disk(dev_type=constants.DT_PLAIN,
2249 763ad5be Thomas Thrainer
                             size=constants.DRBD_META_SIZE,
2250 763ad5be Thomas Thrainer
                             logical_id=(vg_meta, names[1]),
2251 763ad5be Thomas Thrainer
                             params=meta_disk.params)
2252 763ad5be Thomas Thrainer
2253 763ad5be Thomas Thrainer
      new_lvs = [lv_data, lv_meta]
2254 763ad5be Thomas Thrainer
      old_lvs = [child.Copy() for child in dev.children]
2255 763ad5be Thomas Thrainer
      iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
2256 1c3231aa Thomas Thrainer
      excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg, node_uuid)
2257 763ad5be Thomas Thrainer
2258 763ad5be Thomas Thrainer
      # we pass force_create=True to force the LVM creation
2259 763ad5be Thomas Thrainer
      for new_lv in new_lvs:
2260 f2b58d93 Thomas Thrainer
        try:
2261 dad226e3 Thomas Thrainer
          _CreateBlockDevInner(self.lu, node_uuid, self.instance, new_lv, True,
2262 f2b58d93 Thomas Thrainer
                               GetInstanceInfoText(self.instance), False,
2263 f2b58d93 Thomas Thrainer
                               excl_stor)
2264 f2b58d93 Thomas Thrainer
        except errors.DeviceCreationError, e:
2265 f2b58d93 Thomas Thrainer
          raise errors.OpExecError("Can't create block device: %s" % e.message)
2266 763ad5be Thomas Thrainer
2267 763ad5be Thomas Thrainer
    return iv_names
2268 763ad5be Thomas Thrainer
2269 1c3231aa Thomas Thrainer
  def _CheckDevices(self, node_uuid, iv_names):
2270 763ad5be Thomas Thrainer
    for name, (dev, _, _) in iv_names.iteritems():
2271 1c3231aa Thomas Thrainer
      result = _BlockdevFind(self, node_uuid, dev, self.instance)
2272 763ad5be Thomas Thrainer
2273 763ad5be Thomas Thrainer
      msg = result.fail_msg
2274 763ad5be Thomas Thrainer
      if msg or not result.payload:
2275 763ad5be Thomas Thrainer
        if not msg:
2276 763ad5be Thomas Thrainer
          msg = "disk not found"
2277 763ad5be Thomas Thrainer
        raise errors.OpExecError("Can't find DRBD device %s: %s" %
2278 763ad5be Thomas Thrainer
                                 (name, msg))
2279 763ad5be Thomas Thrainer
2280 763ad5be Thomas Thrainer
      if result.payload.is_degraded:
2281 763ad5be Thomas Thrainer
        raise errors.OpExecError("DRBD device %s is degraded!" % name)
2282 763ad5be Thomas Thrainer
2283 1c3231aa Thomas Thrainer
  def _RemoveOldStorage(self, node_uuid, iv_names):
2284 763ad5be Thomas Thrainer
    for name, (_, old_lvs, _) in iv_names.iteritems():
2285 763ad5be Thomas Thrainer
      self.lu.LogInfo("Remove logical volumes for %s", name)
2286 763ad5be Thomas Thrainer
2287 763ad5be Thomas Thrainer
      for lv in old_lvs:
2288 0c3d9c7c Thomas Thrainer
        msg = self.rpc.call_blockdev_remove(node_uuid, (lv, self.instance)) \
2289 0c3d9c7c Thomas Thrainer
                .fail_msg
2290 763ad5be Thomas Thrainer
        if msg:
2291 763ad5be Thomas Thrainer
          self.lu.LogWarning("Can't remove old LV: %s", msg,
2292 763ad5be Thomas Thrainer
                             hint="remove unused LVs manually")
2293 763ad5be Thomas Thrainer
2294 763ad5be Thomas Thrainer
  def _ExecDrbd8DiskOnly(self, feedback_fn): # pylint: disable=W0613
2295 763ad5be Thomas Thrainer
    """Replace a disk on the primary or secondary for DRBD 8.
2296 763ad5be Thomas Thrainer

2297 763ad5be Thomas Thrainer
    The algorithm for replace is quite complicated:
2298 763ad5be Thomas Thrainer

2299 763ad5be Thomas Thrainer
      1. for each disk to be replaced:
2300 763ad5be Thomas Thrainer

2301 763ad5be Thomas Thrainer
        1. create new LVs on the target node with unique names
2302 763ad5be Thomas Thrainer
        1. detach old LVs from the drbd device
2303 763ad5be Thomas Thrainer
        1. rename old LVs to name_replaced.<time_t>
2304 763ad5be Thomas Thrainer
        1. rename new LVs to old LVs
2305 763ad5be Thomas Thrainer
        1. attach the new LVs (with the old names now) to the drbd device
2306 763ad5be Thomas Thrainer

2307 763ad5be Thomas Thrainer
      1. wait for sync across all devices
2308 763ad5be Thomas Thrainer

2309 763ad5be Thomas Thrainer
      1. for each modified disk:
2310 763ad5be Thomas Thrainer

2311 763ad5be Thomas Thrainer
        1. remove old LVs (which have the name name_replaces.<time_t>)
2312 763ad5be Thomas Thrainer

2313 763ad5be Thomas Thrainer
    Failures are not very well handled.
2314 763ad5be Thomas Thrainer

2315 763ad5be Thomas Thrainer
    """
2316 763ad5be Thomas Thrainer
    steps_total = 6
2317 763ad5be Thomas Thrainer
2318 763ad5be Thomas Thrainer
    # Step: check device activation
2319 763ad5be Thomas Thrainer
    self.lu.LogStep(1, steps_total, "Check device existence")
2320 1c3231aa Thomas Thrainer
    self._CheckDisksExistence([self.other_node_uuid, self.target_node_uuid])
2321 1c3231aa Thomas Thrainer
    self._CheckVolumeGroup([self.target_node_uuid, self.other_node_uuid])
2322 763ad5be Thomas Thrainer
2323 763ad5be Thomas Thrainer
    # Step: check other node consistency
2324 763ad5be Thomas Thrainer
    self.lu.LogStep(2, steps_total, "Check peer consistency")
2325 1c3231aa Thomas Thrainer
    self._CheckDisksConsistency(
2326 1c3231aa Thomas Thrainer
      self.other_node_uuid, self.other_node_uuid == self.instance.primary_node,
2327 1c3231aa Thomas Thrainer
      False)
2328 763ad5be Thomas Thrainer
2329 763ad5be Thomas Thrainer
    # Step: create new storage
2330 763ad5be Thomas Thrainer
    self.lu.LogStep(3, steps_total, "Allocate new storage")
2331 1c3231aa Thomas Thrainer
    iv_names = self._CreateNewStorage(self.target_node_uuid)
2332 763ad5be Thomas Thrainer
2333 763ad5be Thomas Thrainer
    # Step: for each lv, detach+rename*2+attach
2334 763ad5be Thomas Thrainer
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
2335 763ad5be Thomas Thrainer
    for dev, old_lvs, new_lvs in iv_names.itervalues():
2336 763ad5be Thomas Thrainer
      self.lu.LogInfo("Detaching %s drbd from local storage", dev.iv_name)
2337 763ad5be Thomas Thrainer
2338 0c3d9c7c Thomas Thrainer
      result = self.rpc.call_blockdev_removechildren(self.target_node_uuid,
2339 0c3d9c7c Thomas Thrainer
                                                     (dev, self.instance),
2340 0c3d9c7c Thomas Thrainer
                                                     (old_lvs, self.instance))
2341 763ad5be Thomas Thrainer
      result.Raise("Can't detach drbd from local storage on node"
2342 1c3231aa Thomas Thrainer
                   " %s for device %s" %
2343 1c3231aa Thomas Thrainer
                   (self.cfg.GetNodeName(self.target_node_uuid), dev.iv_name))
2344 763ad5be Thomas Thrainer
      #dev.children = []
2345 763ad5be Thomas Thrainer
      #cfg.Update(instance)
2346 763ad5be Thomas Thrainer
2347 763ad5be Thomas Thrainer
      # ok, we created the new LVs, so now we know we have the needed
2348 763ad5be Thomas Thrainer
      # storage; as such, we proceed on the target node to rename
2349 763ad5be Thomas Thrainer
      # old_lv to _old, and new_lv to old_lv; note that we rename LVs
2350 a57e502a Thomas Thrainer
      # using the assumption that logical_id == unique_id on that node
2351 763ad5be Thomas Thrainer
2352 763ad5be Thomas Thrainer
      # FIXME(iustin): use a better name for the replaced LVs
2353 763ad5be Thomas Thrainer
      temp_suffix = int(time.time())
2354 a57e502a Thomas Thrainer
      ren_fn = lambda d, suff: (d.logical_id[0],
2355 a57e502a Thomas Thrainer
                                d.logical_id[1] + "_replaced-%s" % suff)
2356 763ad5be Thomas Thrainer
2357 763ad5be Thomas Thrainer
      # Build the rename list based on what LVs exist on the node
2358 763ad5be Thomas Thrainer
      rename_old_to_new = []
2359 763ad5be Thomas Thrainer
      for to_ren in old_lvs:
2360 0c3d9c7c Thomas Thrainer
        result = self.rpc.call_blockdev_find(self.target_node_uuid,
2361 0c3d9c7c Thomas Thrainer
                                             (to_ren, self.instance))
2362 763ad5be Thomas Thrainer
        if not result.fail_msg and result.payload:
2363 763ad5be Thomas Thrainer
          # device exists
2364 763ad5be Thomas Thrainer
          rename_old_to_new.append((to_ren, ren_fn(to_ren, temp_suffix)))
2365 763ad5be Thomas Thrainer
2366 763ad5be Thomas Thrainer
      self.lu.LogInfo("Renaming the old LVs on the target node")
2367 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_rename(self.target_node_uuid,
2368 763ad5be Thomas Thrainer
                                             rename_old_to_new)
2369 1c3231aa Thomas Thrainer
      result.Raise("Can't rename old LVs on node %s" %
2370 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(self.target_node_uuid))
2371 763ad5be Thomas Thrainer
2372 763ad5be Thomas Thrainer
      # Now we rename the new LVs to the old LVs
2373 763ad5be Thomas Thrainer
      self.lu.LogInfo("Renaming the new LVs on the target node")
2374 a57e502a Thomas Thrainer
      rename_new_to_old = [(new, old.logical_id)
2375 763ad5be Thomas Thrainer
                           for old, new in zip(old_lvs, new_lvs)]
2376 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_rename(self.target_node_uuid,
2377 763ad5be Thomas Thrainer
                                             rename_new_to_old)
2378 1c3231aa Thomas Thrainer
      result.Raise("Can't rename new LVs on node %s" %
2379 1c3231aa Thomas Thrainer
                   self.cfg.GetNodeName(self.target_node_uuid))
2380 763ad5be Thomas Thrainer
2381 763ad5be Thomas Thrainer
      # Intermediate steps of in memory modifications
2382 763ad5be Thomas Thrainer
      for old, new in zip(old_lvs, new_lvs):
2383 763ad5be Thomas Thrainer
        new.logical_id = old.logical_id
2384 763ad5be Thomas Thrainer
2385 763ad5be Thomas Thrainer
      # We need to modify old_lvs so that removal later removes the
2386 763ad5be Thomas Thrainer
      # right LVs, not the newly added ones; note that old_lvs is a
2387 763ad5be Thomas Thrainer
      # copy here
2388 763ad5be Thomas Thrainer
      for disk in old_lvs:
2389 763ad5be Thomas Thrainer
        disk.logical_id = ren_fn(disk, temp_suffix)
2390 763ad5be Thomas Thrainer
2391 763ad5be Thomas Thrainer
      # Now that the new lvs have the old name, we can add them to the device
2392 1c3231aa Thomas Thrainer
      self.lu.LogInfo("Adding new mirror component on %s",
2393 1c3231aa Thomas Thrainer
                      self.cfg.GetNodeName(self.target_node_uuid))
2394 1c3231aa Thomas Thrainer
      result = self.rpc.call_blockdev_addchildren(self.target_node_uuid,
2395 0c3d9c7c Thomas Thrainer
                                                  (dev, self.instance),
2396 0c3d9c7c Thomas Thrainer
                                                  (new_lvs, self.instance))
2397 763ad5be Thomas Thrainer
      msg = result.fail_msg
2398 763ad5be Thomas Thrainer
      if msg:
2399 763ad5be Thomas Thrainer
        for new_lv in new_lvs:
2400 1c3231aa Thomas Thrainer
          msg2 = self.rpc.call_blockdev_remove(self.target_node_uuid,
2401 0c3d9c7c Thomas Thrainer
                                               (new_lv, self.instance)).fail_msg
2402 763ad5be Thomas Thrainer
          if msg2:
2403 763ad5be Thomas Thrainer
            self.lu.LogWarning("Can't rollback device %s: %s", dev, msg2,
2404 763ad5be Thomas Thrainer
                               hint=("cleanup manually the unused logical"
2405 763ad5be Thomas Thrainer
                                     "volumes"))
2406 763ad5be Thomas Thrainer
        raise errors.OpExecError("Can't add local storage to drbd: %s" % msg)
2407 763ad5be Thomas Thrainer
2408 763ad5be Thomas Thrainer
    cstep = itertools.count(5)
2409 763ad5be Thomas Thrainer
2410 763ad5be Thomas Thrainer
    if self.early_release:
2411 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2412 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)
2413 763ad5be Thomas Thrainer
      # TODO: Check if releasing locks early still makes sense
2414 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
2415 763ad5be Thomas Thrainer
    else:
2416 763ad5be Thomas Thrainer
      # Release all resource locks except those used by the instance
2417 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
2418 5eacbcae Thomas Thrainer
                   keep=self.node_secondary_ip.keys())
2419 763ad5be Thomas Thrainer
2420 763ad5be Thomas Thrainer
    # Release all node locks while waiting for sync
2421 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE)
2422 763ad5be Thomas Thrainer
2423 763ad5be Thomas Thrainer
    # TODO: Can the instance lock be downgraded here? Take the optional disk
2424 763ad5be Thomas Thrainer
    # shutdown in the caller into consideration.
2425 763ad5be Thomas Thrainer
2426 763ad5be Thomas Thrainer
    # Wait for sync
2427 763ad5be Thomas Thrainer
    # This can fail as the old devices are degraded and _WaitForSync
2428 763ad5be Thomas Thrainer
    # does a combined result over all disks, so we don't check its return value
2429 763ad5be Thomas Thrainer
    self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
2430 5eacbcae Thomas Thrainer
    WaitForSync(self.lu, self.instance)
2431 763ad5be Thomas Thrainer
2432 763ad5be Thomas Thrainer
    # Check all devices manually
2433 763ad5be Thomas Thrainer
    self._CheckDevices(self.instance.primary_node, iv_names)
2434 763ad5be Thomas Thrainer
2435 763ad5be Thomas Thrainer
    # Step: remove old storage
2436 763ad5be Thomas Thrainer
    if not self.early_release:
2437 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2438 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)
2439 763ad5be Thomas Thrainer
2440 763ad5be Thomas Thrainer
  def _ExecDrbd8Secondary(self, feedback_fn):
2441 763ad5be Thomas Thrainer
    """Replace the secondary node for DRBD 8.
2442 763ad5be Thomas Thrainer

2443 763ad5be Thomas Thrainer
    The algorithm for replace is quite complicated:
2444 763ad5be Thomas Thrainer
      - for all disks of the instance:
2445 763ad5be Thomas Thrainer
        - create new LVs on the new node with same names
2446 763ad5be Thomas Thrainer
        - shutdown the drbd device on the old secondary
2447 763ad5be Thomas Thrainer
        - disconnect the drbd network on the primary
2448 763ad5be Thomas Thrainer
        - create the drbd device on the new secondary
2449 763ad5be Thomas Thrainer
        - network attach the drbd on the primary, using an artifice:
2450 763ad5be Thomas Thrainer
          the drbd code for Attach() will connect to the network if it
2451 763ad5be Thomas Thrainer
          finds a device which is connected to the good local disks but
2452 763ad5be Thomas Thrainer
          not network enabled
2453 763ad5be Thomas Thrainer
      - wait for sync across all devices
2454 763ad5be Thomas Thrainer
      - remove all disks from the old secondary
2455 763ad5be Thomas Thrainer

2456 763ad5be Thomas Thrainer
    Failures are not very well handled.
2457 763ad5be Thomas Thrainer

2458 763ad5be Thomas Thrainer
    """
2459 763ad5be Thomas Thrainer
    steps_total = 6
2460 763ad5be Thomas Thrainer
2461 763ad5be Thomas Thrainer
    pnode = self.instance.primary_node
2462 763ad5be Thomas Thrainer
2463 763ad5be Thomas Thrainer
    # Step: check device activation
2464 763ad5be Thomas Thrainer
    self.lu.LogStep(1, steps_total, "Check device existence")
2465 763ad5be Thomas Thrainer
    self._CheckDisksExistence([self.instance.primary_node])
2466 763ad5be Thomas Thrainer
    self._CheckVolumeGroup([self.instance.primary_node])
2467 763ad5be Thomas Thrainer
2468 763ad5be Thomas Thrainer
    # Step: check other node consistency
2469 763ad5be Thomas Thrainer
    self.lu.LogStep(2, steps_total, "Check peer consistency")
2470 763ad5be Thomas Thrainer
    self._CheckDisksConsistency(self.instance.primary_node, True, True)
2471 763ad5be Thomas Thrainer
2472 763ad5be Thomas Thrainer
    # Step: create new storage
2473 763ad5be Thomas Thrainer
    self.lu.LogStep(3, steps_total, "Allocate new storage")
2474 5eacbcae Thomas Thrainer
    disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
2475 1c3231aa Thomas Thrainer
    excl_stor = IsExclusiveStorageEnabledNodeUuid(self.lu.cfg,
2476 1c3231aa Thomas Thrainer
                                                  self.new_node_uuid)
2477 763ad5be Thomas Thrainer
    for idx, dev in enumerate(disks):
2478 763ad5be Thomas Thrainer
      self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
2479 1c3231aa Thomas Thrainer
                      (self.cfg.GetNodeName(self.new_node_uuid), idx))
2480 763ad5be Thomas Thrainer
      # we pass force_create=True to force LVM creation
2481 763ad5be Thomas Thrainer
      for new_lv in dev.children:
2482 f2b58d93 Thomas Thrainer
        try:
2483 dad226e3 Thomas Thrainer
          _CreateBlockDevInner(self.lu, self.new_node_uuid, self.instance,
2484 dad226e3 Thomas Thrainer
                               new_lv, True, GetInstanceInfoText(self.instance),
2485 dad226e3 Thomas Thrainer
                               False, excl_stor)
2486 f2b58d93 Thomas Thrainer
        except errors.DeviceCreationError, e:
2487 f2b58d93 Thomas Thrainer
          raise errors.OpExecError("Can't create block device: %s" % e.message)
2488 763ad5be Thomas Thrainer
2489 763ad5be Thomas Thrainer
    # Step 4: dbrd minors and drbd setups changes
2490 763ad5be Thomas Thrainer
    # after this, we must manually remove the drbd minors on both the
2491 763ad5be Thomas Thrainer
    # error and the success paths
2492 763ad5be Thomas Thrainer
    self.lu.LogStep(4, steps_total, "Changing drbd configuration")
2493 1c3231aa Thomas Thrainer
    minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
2494 1c3231aa Thomas Thrainer
                                         for _ in self.instance.disks],
2495 da4a52a3 Thomas Thrainer
                                        self.instance.uuid)
2496 763ad5be Thomas Thrainer
    logging.debug("Allocated minors %r", minors)
2497 763ad5be Thomas Thrainer
2498 763ad5be Thomas Thrainer
    iv_names = {}
2499 763ad5be Thomas Thrainer
    for idx, (dev, new_minor) in enumerate(zip(self.instance.disks, minors)):
2500 763ad5be Thomas Thrainer
      self.lu.LogInfo("activating a new drbd on %s for disk/%d" %
2501 1c3231aa Thomas Thrainer
                      (self.cfg.GetNodeName(self.new_node_uuid), idx))
2502 763ad5be Thomas Thrainer
      # create new devices on new_node; note that we create two IDs:
2503 763ad5be Thomas Thrainer
      # one without port, so the drbd will be activated without
2504 763ad5be Thomas Thrainer
      # networking information on the new node at this stage, and one
2505 763ad5be Thomas Thrainer
      # with network, for the latter activation in step 4
2506 763ad5be Thomas Thrainer
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
2507 763ad5be Thomas Thrainer
      if self.instance.primary_node == o_node1:
2508 763ad5be Thomas Thrainer
        p_minor = o_minor1
2509 763ad5be Thomas Thrainer
      else:
2510 763ad5be Thomas Thrainer
        assert self.instance.primary_node == o_node2, "Three-node instance?"
2511 763ad5be Thomas Thrainer
        p_minor = o_minor2
2512 763ad5be Thomas Thrainer
2513 1c3231aa Thomas Thrainer
      new_alone_id = (self.instance.primary_node, self.new_node_uuid, None,
2514 763ad5be Thomas Thrainer
                      p_minor, new_minor, o_secret)
2515 1c3231aa Thomas Thrainer
      new_net_id = (self.instance.primary_node, self.new_node_uuid, o_port,
2516 763ad5be Thomas Thrainer
                    p_minor, new_minor, o_secret)
2517 763ad5be Thomas Thrainer
2518 763ad5be Thomas Thrainer
      iv_names[idx] = (dev, dev.children, new_net_id)
2519 763ad5be Thomas Thrainer
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
2520 763ad5be Thomas Thrainer
                    new_net_id)
2521 cd3b4ff4 Helga Velroyen
      new_drbd = objects.Disk(dev_type=constants.DT_DRBD8,
2522 763ad5be Thomas Thrainer
                              logical_id=new_alone_id,
2523 763ad5be Thomas Thrainer
                              children=dev.children,
2524 763ad5be Thomas Thrainer
                              size=dev.size,
2525 763ad5be Thomas Thrainer
                              params={})
2526 5eacbcae Thomas Thrainer
      (anno_new_drbd,) = AnnotateDiskParams(self.instance, [new_drbd],
2527 5eacbcae Thomas Thrainer
                                            self.cfg)
2528 763ad5be Thomas Thrainer
      try:
2529 1c3231aa Thomas Thrainer
        CreateSingleBlockDev(self.lu, self.new_node_uuid, self.instance,
2530 5eacbcae Thomas Thrainer
                             anno_new_drbd,
2531 5eacbcae Thomas Thrainer
                             GetInstanceInfoText(self.instance), False,
2532 5eacbcae Thomas Thrainer
                             excl_stor)
2533 763ad5be Thomas Thrainer
      except errors.GenericError:
2534 da4a52a3 Thomas Thrainer
        self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2535 763ad5be Thomas Thrainer
        raise
2536 763ad5be Thomas Thrainer
2537 763ad5be Thomas Thrainer
    # We have new devices, shutdown the drbd on the old secondary
2538 763ad5be Thomas Thrainer
    for idx, dev in enumerate(self.instance.disks):
2539 763ad5be Thomas Thrainer
      self.lu.LogInfo("Shutting down drbd for disk/%d on old node", idx)
2540 1c3231aa Thomas Thrainer
      msg = self.rpc.call_blockdev_shutdown(self.target_node_uuid,
2541 763ad5be Thomas Thrainer
                                            (dev, self.instance)).fail_msg
2542 763ad5be Thomas Thrainer
      if msg:
2543 763ad5be Thomas Thrainer
        self.lu.LogWarning("Failed to shutdown drbd for disk/%d on old"
2544 763ad5be Thomas Thrainer
                           "node: %s" % (idx, msg),
2545 763ad5be Thomas Thrainer
                           hint=("Please cleanup this device manually as"
2546 763ad5be Thomas Thrainer
                                 " soon as possible"))
2547 763ad5be Thomas Thrainer
2548 763ad5be Thomas Thrainer
    self.lu.LogInfo("Detaching primary drbds from the network (=> standalone)")
2549 0c3d9c7c Thomas Thrainer
    result = self.rpc.call_drbd_disconnect_net(
2550 0c3d9c7c Thomas Thrainer
               [pnode], (self.instance.disks, self.instance))[pnode]
2551 763ad5be Thomas Thrainer
2552 763ad5be Thomas Thrainer
    msg = result.fail_msg
2553 763ad5be Thomas Thrainer
    if msg:
2554 763ad5be Thomas Thrainer
      # detaches didn't succeed (unlikely)
2555 da4a52a3 Thomas Thrainer
      self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2556 763ad5be Thomas Thrainer
      raise errors.OpExecError("Can't detach the disks from the network on"
2557 763ad5be Thomas Thrainer
                               " old node: %s" % (msg,))
2558 763ad5be Thomas Thrainer
2559 763ad5be Thomas Thrainer
    # if we managed to detach at least one, we update all the disks of
2560 763ad5be Thomas Thrainer
    # the instance to point to the new secondary
2561 763ad5be Thomas Thrainer
    self.lu.LogInfo("Updating instance configuration")
2562 763ad5be Thomas Thrainer
    for dev, _, new_logical_id in iv_names.itervalues():
2563 763ad5be Thomas Thrainer
      dev.logical_id = new_logical_id
2564 763ad5be Thomas Thrainer
2565 763ad5be Thomas Thrainer
    self.cfg.Update(self.instance, feedback_fn)
2566 763ad5be Thomas Thrainer
2567 763ad5be Thomas Thrainer
    # Release all node locks (the configuration has been updated)
2568 5eacbcae Thomas Thrainer
    ReleaseLocks(self.lu, locking.LEVEL_NODE)
2569 763ad5be Thomas Thrainer
2570 763ad5be Thomas Thrainer
    # and now perform the drbd attach
2571 763ad5be Thomas Thrainer
    self.lu.LogInfo("Attaching primary drbds to new secondary"
2572 763ad5be Thomas Thrainer
                    " (standalone => connected)")
2573 763ad5be Thomas Thrainer
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
2574 1c3231aa Thomas Thrainer
                                            self.new_node_uuid],
2575 763ad5be Thomas Thrainer
                                           (self.instance.disks, self.instance),
2576 763ad5be Thomas Thrainer
                                           self.instance.name,
2577 763ad5be Thomas Thrainer
                                           False)
2578 763ad5be Thomas Thrainer
    for to_node, to_result in result.items():
2579 763ad5be Thomas Thrainer
      msg = to_result.fail_msg
2580 763ad5be Thomas Thrainer
      if msg:
2581 763ad5be Thomas Thrainer
        self.lu.LogWarning("Can't attach drbd disks on node %s: %s",
2582 1c3231aa Thomas Thrainer
                           self.cfg.GetNodeName(to_node), msg,
2583 763ad5be Thomas Thrainer
                           hint=("please do a gnt-instance info to see the"
2584 763ad5be Thomas Thrainer
                                 " status of disks"))
2585 763ad5be Thomas Thrainer
2586 763ad5be Thomas Thrainer
    cstep = itertools.count(5)
2587 763ad5be Thomas Thrainer
2588 763ad5be Thomas Thrainer
    if self.early_release:
2589 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2590 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)
2591 763ad5be Thomas Thrainer
      # TODO: Check if releasing locks early still makes sense
2592 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES)
2593 763ad5be Thomas Thrainer
    else:
2594 763ad5be Thomas Thrainer
      # Release all resource locks except those used by the instance
2595 5eacbcae Thomas Thrainer
      ReleaseLocks(self.lu, locking.LEVEL_NODE_RES,
2596 5eacbcae Thomas Thrainer
                   keep=self.node_secondary_ip.keys())
2597 763ad5be Thomas Thrainer
2598 763ad5be Thomas Thrainer
    # TODO: Can the instance lock be downgraded here? Take the optional disk
2599 763ad5be Thomas Thrainer
    # shutdown in the caller into consideration.
2600 763ad5be Thomas Thrainer
2601 763ad5be Thomas Thrainer
    # Wait for sync
2602 763ad5be Thomas Thrainer
    # This can fail as the old devices are degraded and _WaitForSync
2603 763ad5be Thomas Thrainer
    # does a combined result over all disks, so we don't check its return value
2604 763ad5be Thomas Thrainer
    self.lu.LogStep(cstep.next(), steps_total, "Sync devices")
2605 5eacbcae Thomas Thrainer
    WaitForSync(self.lu, self.instance)
2606 763ad5be Thomas Thrainer
2607 763ad5be Thomas Thrainer
    # Check all devices manually
2608 763ad5be Thomas Thrainer
    self._CheckDevices(self.instance.primary_node, iv_names)
2609 763ad5be Thomas Thrainer
2610 763ad5be Thomas Thrainer
    # Step: remove old storage
2611 763ad5be Thomas Thrainer
    if not self.early_release:
2612 763ad5be Thomas Thrainer
      self.lu.LogStep(cstep.next(), steps_total, "Removing old storage")
2613 1c3231aa Thomas Thrainer
      self._RemoveOldStorage(self.target_node_uuid, iv_names)