from ganeti.masterd import iallocator
from ganeti import objects
from ganeti import utils
-from ganeti import opcodes
from ganeti import rpc
from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, Tasklet
from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeUuidAndName, \
CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
- IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes
+ IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes, \
+ CheckDiskTemplateEnabled
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
constants.DT_PLAIN: "",
constants.DT_RBD: ".rbd",
constants.DT_EXT: ".ext",
- }
-
-
-_DISK_TEMPLATE_DEVICE_TYPE = {
- constants.DT_PLAIN: constants.LD_LV,
- constants.DT_FILE: constants.LD_FILE,
- constants.DT_SHARED_FILE: constants.LD_FILE,
- constants.DT_BLOCK: constants.LD_BLOCKDEV,
- constants.DT_RBD: constants.LD_RBD,
- constants.DT_EXT: constants.LD_EXT,
+ constants.DT_FILE: ".file",
+ constants.DT_SHARED_FILE: ".sharedfile",
}
if disks is None:
disks = instance.disks
+ CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
+
if instance.disk_template in constants.DTS_FILEBASED:
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
port = lu.cfg.AllocatePort()
shared_secret = lu.cfg.GenerateDRBDSecret(lu.proc.GetECId())
- dev_data = objects.Disk(dev_type=constants.LD_LV, size=size,
+ dev_data = objects.Disk(dev_type=constants.DT_PLAIN, size=size,
logical_id=(vgnames[0], names[0]),
params={})
dev_data.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
- dev_meta = objects.Disk(dev_type=constants.LD_LV,
+ dev_meta = objects.Disk(dev_type=constants.DT_PLAIN,
size=constants.DRBD_META_SIZE,
logical_id=(vgnames[1], names[1]),
params={})
dev_meta.uuid = lu.cfg.GenerateUniqueID(lu.proc.GetECId())
- drbd_dev = objects.Disk(dev_type=constants.LD_DRBD8, size=size,
+ drbd_dev = objects.Disk(dev_type=constants.DT_DRBD8, size=size,
logical_id=(primary_uuid, secondary_uuid, port,
p_minor, s_minor,
shared_secret),
def GenerateDiskTemplate(
lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
disk_info, file_storage_dir, file_driver, base_index,
- feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
- _req_shr_file_storage=opcodes.RequireSharedFileStorage):
+ feedback_fn, full_disk_params):
"""Generate the entire disk layout for a given template type.
"""
disk_count = len(disk_info)
disks = []
+ CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name)
+
if template_name == constants.DT_DISKLESS:
pass
elif template_name == constants.DT_DRBD8:
if secondary_node_uuids:
raise errors.ProgrammerError("Wrong template configuration")
- if template_name == constants.DT_FILE:
- _req_file_storage()
- elif template_name == constants.DT_SHARED_FILE:
- _req_shr_file_storage()
-
name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
if name_prefix is None:
names = None
elif template_name in (constants.DT_FILE, constants.DT_SHARED_FILE):
logical_id_fn = \
lambda _, disk_index, disk: (file_driver,
- "%s/disk%d" % (file_storage_dir,
- disk_index))
+ "%s/%s" % (file_storage_dir,
+ names[idx]))
elif template_name == constants.DT_BLOCK:
logical_id_fn = \
lambda idx, disk_index, disk: (constants.BLOCKDEV_DRIVER_MANUAL,
else:
raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
- dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
+ dev_type = template_name
for idx, disk in enumerate(disk_info):
params = {}
continue
# update secondaries for disks, if needed
- if self.op.node_uuids and disk.dev_type == constants.LD_DRBD8:
+ if self.op.node_uuids and disk.dev_type == constants.DT_DRBD8:
# need to update the nodes and minors
assert len(self.op.node_uuids) == 2
assert len(disk.logical_id) == 6 # otherwise disk internals
for idx, new_id, changes in mods:
disk = self.instance.disks[idx]
if new_id is not None:
- assert disk.dev_type == constants.LD_DRBD8
+ assert disk.dev_type == constants.DT_DRBD8
disk.logical_id = new_id
if changes:
disk.Update(size=changes.get(constants.IDISK_SIZE, None),
cleanup=new_disks)
+def _PerformNodeInfoCall(lu, node_uuids, vg):
+ """Prepares the input and performs a node info call.
+
+ @type lu: C{LogicalUnit}
+ @param lu: a logical unit from which we get configuration data
+ @type node_uuids: list of string
+ @param node_uuids: list of node UUIDs to perform the call for
+ @type vg: string
+ @param vg: the volume group's name
+
+ """
+ lvm_storage_units = [(constants.ST_LVM_VG, vg)]
+ storage_units = rpc.PrepareStorageUnitsForNodes(lu.cfg, lvm_storage_units,
+ node_uuids)
+ hvname = lu.cfg.GetHypervisorType()
+ hvparams = lu.cfg.GetClusterInfo().hvparams
+ nodeinfo = lu.rpc.call_node_info(node_uuids, storage_units,
+ [(hvname, hvparams[hvname])])
+ return nodeinfo
+
+
+def _CheckVgCapacityForNode(node_name, node_info, vg, requested):
+ """Checks the vg capacity for a given node.
+
+ @type node_info: tuple (_, list of dicts, _)
+ @param node_info: the result of the node info call for one node
+ @type node_name: string
+ @param node_name: the name of the node
+ @type vg: string
+ @param vg: volume group name
+ @type requested: int
+ @param requested: the amount of disk in MiB to check for
+ @raise errors.OpPrereqError: if the node doesn't have enough disk,
+ or we cannot check the node
+
+ """
+ (_, space_info, _) = node_info
+ lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
+ space_info, constants.ST_LVM_VG)
+ if not lvm_vg_info:
+ raise errors.OpPrereqError("Can't retrieve storage information for LVM")
+ vg_free = lvm_vg_info.get("storage_free", None)
+ if not isinstance(vg_free, int):
+ raise errors.OpPrereqError("Can't compute free disk space on node"
+ " %s for vg %s, result was '%s'" %
+ (node_name, vg, vg_free), errors.ECODE_ENVIRON)
+ if requested > vg_free:
+ raise errors.OpPrereqError("Not enough disk space on target node %s"
+ " vg %s: required %d MiB, available %d MiB" %
+ (node_name, vg, requested, vg_free),
+ errors.ECODE_NORES)
+
+
def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
"""Checks if nodes have enough free disk space in the specified VG.
or we cannot check the node
"""
- es_flags = rpc.GetExclusiveStorageForNodes(lu.cfg, node_uuids)
- hvname = lu.cfg.GetHypervisorType()
- hvparams = lu.cfg.GetClusterInfo().hvparams
- nodeinfo = lu.rpc.call_node_info(node_uuids, [(constants.ST_LVM_VG, vg)],
- [(hvname, hvparams[hvname])], es_flags)
+ nodeinfo = _PerformNodeInfoCall(lu, node_uuids, vg)
for node in node_uuids:
node_name = lu.cfg.GetNodeName(node)
-
info = nodeinfo[node]
info.Raise("Cannot get current information from node %s" % node_name,
prereq=True, ecode=errors.ECODE_ENVIRON)
- (_, (vg_info, ), _) = info.payload
- vg_free = vg_info.get("storage_free", None)
- if not isinstance(vg_free, int):
- raise errors.OpPrereqError("Can't compute free disk space on node"
- " %s for vg %s, result was '%s'" %
- (node_name, vg, vg_free), errors.ECODE_ENVIRON)
- if requested > vg_free:
- raise errors.OpPrereqError("Not enough disk space on target node %s"
- " vg %s: required %d MiB, available %d MiB" %
- (node_name, vg, requested, vg_free),
- errors.ECODE_NORES)
+ _CheckVgCapacityForNode(node_name, info.payload, vg, requested)
def CheckNodesFreeDiskPerVG(lu, node_uuids, req_sizes):
def _CheckDiskSpace(self, node_uuids, req_vgspace):
template = self.instance.disk_template
- if template not in (constants.DTS_NO_FREE_SPACE_CHECK):
+ if (template not in (constants.DTS_NO_FREE_SPACE_CHECK) and
+ not any(self.node_es_flags.values())):
# TODO: check the free disk space for file, when that feature will be
# supported
- if any(self.node_es_flags.values()):
- # With exclusive storage we need to something smarter than just looking
- # at free space; for now, let's simply abort the operation.
- raise errors.OpPrereqError("Cannot grow disks when exclusive_storage"
- " is enabled", errors.ECODE_STATE)
+ # With exclusive storage we need to do something smarter than just looking
+ # at free space, which, in the end, is basically a dry run. So we rely on
+ # the dry run performed in Exec() instead.
CheckNodesFreeDiskPerVG(self, node_uuids, req_vgspace)
def Exec(self, feedback_fn):
if msg or not result.payload:
if not msg:
msg = "disk not found"
- raise errors.OpExecError("Can't find disk/%d on node %s: %s" %
- (idx, self.cfg.GetNodeName(node_uuid), msg))
+ if not self._CheckDisksActivated(self.instance):
+ extra_hint = ("\nDisks seem to be not properly activated. Try"
+ " running activate-disks on the instance before"
+ " using replace-disks.")
+ else:
+ extra_hint = ""
+ raise errors.OpExecError("Can't find disk/%d on node %s: %s%s" %
+ (idx, self.cfg.GetNodeName(node_uuid), msg,
+ extra_hint))
def _CheckDisksConsistency(self, node_uuid, on_primary, ldisk):
for idx, dev in enumerate(self.instance.disks):
(data_disk, meta_disk) = dev.children
vg_data = data_disk.logical_id[0]
- lv_data = objects.Disk(dev_type=constants.LD_LV, size=dev.size,
+ lv_data = objects.Disk(dev_type=constants.DT_PLAIN, size=dev.size,
logical_id=(vg_data, names[0]),
params=data_disk.params)
vg_meta = meta_disk.logical_id[0]
- lv_meta = objects.Disk(dev_type=constants.LD_LV,
+ lv_meta = objects.Disk(dev_type=constants.DT_PLAIN,
size=constants.DRBD_META_SIZE,
logical_id=(vg_meta, names[1]),
params=meta_disk.params)
iv_names[idx] = (dev, dev.children, new_net_id)
logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
new_net_id)
- new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
+ new_drbd = objects.Disk(dev_type=constants.DT_DRBD8,
logical_id=new_alone_id,
children=dev.children,
size=dev.size,