from ganeti.cmdlib.common import INSTANCE_DOWN, INSTANCE_NOT_RUNNING, \
AnnotateDiskParams, CheckIAllocatorOrNode, ExpandNodeUuidAndName, \
CheckNodeOnline, CheckInstanceNodeGroups, CheckInstanceState, \
- IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes
+ IsExclusiveStorageEnabledNode, FindFaultyInstanceDisks, GetWantedNodes, \
+ CheckDiskTemplateEnabled
from ganeti.cmdlib.instance_utils import GetInstanceInfoText, \
CopyLockList, ReleaseLocks, CheckNodeVmCapable, \
BuildInstanceHookEnvByObject, CheckNodeNotDrained, CheckTargetNodeIPolicy
if disks is None:
disks = instance.disks
+ CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
+
if instance.disk_template in constants.DTS_FILEBASED:
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
result = lu.rpc.call_file_storage_dir_create(pnode_uuid, file_storage_dir)
def GenerateDiskTemplate(
- lu, template_name, instance_name, primary_node_uuid, secondary_node_uuids,
+ lu, template_name, instance_uuid, primary_node_uuid, secondary_node_uuids,
disk_info, file_storage_dir, file_driver, base_index,
- feedback_fn, full_disk_params, _req_file_storage=opcodes.RequireFileStorage,
- _req_shr_file_storage=opcodes.RequireSharedFileStorage):
+ feedback_fn, full_disk_params):
"""Generate the entire disk layout for a given template type.
"""
disk_count = len(disk_info)
disks = []
+ CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), template_name)
+
if template_name == constants.DT_DISKLESS:
pass
elif template_name == constants.DT_DRBD8:
raise errors.ProgrammerError("Wrong template configuration")
remote_node_uuid = secondary_node_uuids[0]
minors = lu.cfg.AllocateDRBDMinor(
- [primary_node_uuid, remote_node_uuid] * len(disk_info), instance_name)
+ [primary_node_uuid, remote_node_uuid] * len(disk_info), instance_uuid)
(drbd_params, _, _) = objects.Disk.ComputeLDParams(template_name,
full_disk_params)
if secondary_node_uuids:
raise errors.ProgrammerError("Wrong template configuration")
- if template_name == constants.DT_FILE:
- _req_file_storage()
- elif template_name == constants.DT_SHARED_FILE:
- _req_shr_file_storage()
-
name_prefix = _DISK_TEMPLATE_NAME_PREFIX.get(template_name, None)
if name_prefix is None:
names = None
# requires going via the node before it's locked, requiring
# verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
- self.cfg.GetInstanceNodeGroups(self.op.instance_name, primary_only=True)
+ self.cfg.GetInstanceNodeGroups(self.op.instance_uuid, primary_only=True)
elif level == locking.LEVEL_NODE:
# If an allocator is used, then we lock all the nodes in the current
This checks that the instance is in the cluster and is not running.
"""
- instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
if self.op.node_uuids:
if owned_groups:
# Node group locks are acquired only for the primary node (and only
# when the allocator is used)
- CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups,
+ CheckInstanceNodeGroups(self.cfg, instance.uuid, owned_groups,
primary_only=True)
# if we replace nodes *and* the old primary is offline, we don't
"""Recreate the disks.
"""
- instance = self.instance
-
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
to_skip = []
mods = [] # keeps track of needed changes
- for idx, disk in enumerate(instance.disks):
+ for idx, disk in enumerate(self.instance.disks):
try:
changes = self.disks[idx]
except KeyError:
# have changed
(_, _, old_port, _, _, old_secret) = disk.logical_id
new_minors = self.cfg.AllocateDRBDMinor(self.op.node_uuids,
- instance.name)
+ self.instance.uuid)
new_id = (self.op.node_uuids[0], self.op.node_uuids[1], old_port,
new_minors[0], new_minors[1], old_secret)
assert len(disk.logical_id) == len(new_id)
# now that we have passed all asserts above, we can apply the mods
# in a single run (to avoid partial changes)
for idx, new_id, changes in mods:
- disk = instance.disks[idx]
+ disk = self.instance.disks[idx]
if new_id is not None:
assert disk.dev_type == constants.LD_DRBD8
disk.logical_id = new_id
# change primary node, if needed
if self.op.node_uuids:
- instance.primary_node = self.op.node_uuids[0]
+ self.instance.primary_node = self.op.node_uuids[0]
self.LogWarning("Changing the instance's nodes, you will have to"
" remove any disks left on the older nodes manually")
if self.op.node_uuids:
- self.cfg.Update(instance, feedback_fn)
+ self.cfg.Update(self.instance, feedback_fn)
# All touched nodes must be locked
mylocks = self.owned_locks(locking.LEVEL_NODE)
- assert mylocks.issuperset(frozenset(instance.all_nodes))
- new_disks = CreateDisks(self, instance, to_skip=to_skip)
+ assert mylocks.issuperset(frozenset(self.instance.all_nodes))
+ new_disks = CreateDisks(self, self.instance, to_skip=to_skip)
# TODO: Release node locks before wiping, or explain why it's not possible
if self.cfg.GetClusterInfo().prealloc_wipe_disks:
wipedisks = [(idx, disk, 0)
- for (idx, disk) in enumerate(instance.disks)
+ for (idx, disk) in enumerate(self.instance.disks)
if idx not in to_skip]
- WipeOrCleanupDisks(self, instance, disks=wipedisks, cleanup=new_disks)
+ WipeOrCleanupDisks(self, self.instance, disks=wipedisks,
+ cleanup=new_disks)
+
+
+def _PerformNodeInfoCall(lu, node_uuids, vg):
+ """Prepares the input and performs a node info call.
+
+ @type lu: C{LogicalUnit}
+ @param lu: a logical unit from which we get configuration data
+ @type node_uuids: list of string
+ @param node_uuids: list of node UUIDs to perform the call for
+ @type vg: string
+ @param vg: the volume group's name
+
+ """
+ lvm_storage_units = [(constants.ST_LVM_VG, vg)]
+ storage_units = rpc.PrepareStorageUnitsForNodes(lu.cfg, lvm_storage_units,
+ node_uuids)
+ hvname = lu.cfg.GetHypervisorType()
+ hvparams = lu.cfg.GetClusterInfo().hvparams
+ nodeinfo = lu.rpc.call_node_info(node_uuids, storage_units,
+ [(hvname, hvparams[hvname])])
+ return nodeinfo
+
+
+def _CheckVgCapacityForNode(node_name, node_info, vg, requested):
+ """Checks the vg capacity for a given node.
+
+ @type node_info: tuple (_, list of dicts, _)
+ @param node_info: the result of the node info call for one node
+ @type node_name: string
+ @param node_name: the name of the node
+ @type vg: string
+ @param vg: volume group name
+ @type requested: int
+ @param requested: the amount of disk in MiB to check for
+ @raise errors.OpPrereqError: if the node doesn't have enough disk,
+ or we cannot check the node
+
+ """
+ (_, space_info, _) = node_info
+ lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
+ space_info, constants.ST_LVM_VG)
+ if not lvm_vg_info:
+ raise errors.OpPrereqError("Can't retrieve storage information for LVM")
+ vg_free = lvm_vg_info.get("storage_free", None)
+ if not isinstance(vg_free, int):
+ raise errors.OpPrereqError("Can't compute free disk space on node"
+ " %s for vg %s, result was '%s'" %
+ (node_name, vg, vg_free), errors.ECODE_ENVIRON)
+ if requested > vg_free:
+ raise errors.OpPrereqError("Not enough disk space on target node %s"
+ " vg %s: required %d MiB, available %d MiB" %
+ (node_name, vg, requested, vg_free),
+ errors.ECODE_NORES)
def _CheckNodesFreeDiskOnVG(lu, node_uuids, vg, requested):
or we cannot check the node
"""
- es_flags = rpc.GetExclusiveStorageForNodes(lu.cfg, node_uuids)
- # FIXME: This maps everything to storage type 'lvm-vg' to maintain
- # the current functionality. Refactor to make it more flexible.
- hvname = lu.cfg.GetHypervisorType()
- hvparams = lu.cfg.GetClusterInfo().hvparams
- nodeinfo = lu.rpc.call_node_info(node_uuids, [(constants.ST_LVM_VG, vg)],
- [(hvname, hvparams[hvname])], es_flags)
+ nodeinfo = _PerformNodeInfoCall(lu, node_uuids, vg)
for node in node_uuids:
node_name = lu.cfg.GetNodeName(node)
-
info = nodeinfo[node]
info.Raise("Cannot get current information from node %s" % node_name,
prereq=True, ecode=errors.ECODE_ENVIRON)
- (_, (vg_info, ), _) = info.payload
- vg_free = vg_info.get("vg_free", None)
- if not isinstance(vg_free, int):
- raise errors.OpPrereqError("Can't compute free disk space on node"
- " %s for vg %s, result was '%s'" %
- (node_name, vg, vg_free), errors.ECODE_ENVIRON)
- if requested > vg_free:
- raise errors.OpPrereqError("Not enough disk space on target node %s"
- " vg %s: required %d MiB, available %d MiB" %
- (node_name, vg, requested, vg_free),
- errors.ECODE_NORES)
+ _CheckVgCapacityForNode(node_name, info.payload, vg, requested)
def CheckNodesFreeDiskPerVG(lu, node_uuids, req_sizes):
ignored.
"""
- lu.cfg.MarkInstanceDisksInactive(instance.name)
+ lu.cfg.MarkInstanceDisksInactive(instance.uuid)
all_result = True
disks = ExpandCheckDisks(instance, disks)
"""
device_info = []
disks_ok = True
- iname = instance.name
disks = ExpandCheckDisks(instance, disks)
# With the two passes mechanism we try to reduce the window of
# mark instance disks as active before doing actual work, so watcher does
# not try to shut them down erroneously
- lu.cfg.MarkInstanceDisksActive(iname)
+ lu.cfg.MarkInstanceDisksActive(instance.uuid)
# 1st pass, assemble on all nodes in secondary mode
for idx, inst_disk in enumerate(disks):
node_disk.UnsetSize()
lu.cfg.SetDiskID(node_disk, node_uuid)
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
- iname, False, idx)
+ instance.name, False, idx)
msg = result.fail_msg
if msg:
is_offline_secondary = (node_uuid in instance.secondary_nodes and
node_disk.UnsetSize()
lu.cfg.SetDiskID(node_disk, node_uuid)
result = lu.rpc.call_blockdev_assemble(node_uuid, (node_disk, instance),
- iname, True, idx)
+ instance.name, True, idx)
msg = result.fail_msg
if msg:
lu.LogWarning("Could not prepare block device %s on node %s"
lu.cfg.SetDiskID(disk, instance.primary_node)
if not disks_ok:
- lu.cfg.MarkInstanceDisksInactive(iname)
+ lu.cfg.MarkInstanceDisksInactive(instance.uuid)
return disks_ok, device_info
This checks that the instance is in the cluster.
"""
- instance = self.cfg.GetInstanceInfo(self.op.instance_name)
- assert instance is not None, \
+ self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
+ assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
- node_uuids = list(instance.all_nodes)
+ node_uuids = list(self.instance.all_nodes)
for node_uuid in node_uuids:
CheckNodeOnline(self, node_uuid)
+ self.node_es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, node_uuids)
- self.instance = instance
-
- if instance.disk_template not in constants.DTS_GROWABLE:
+ if self.instance.disk_template not in constants.DTS_GROWABLE:
raise errors.OpPrereqError("Instance's disk layout does not support"
" growing", errors.ECODE_INVAL)
- self.disk = instance.FindDisk(self.op.disk)
+ self.disk = self.instance.FindDisk(self.op.disk)
if self.op.absolute:
self.target = self.op.amount
def _CheckDiskSpace(self, node_uuids, req_vgspace):
template = self.instance.disk_template
- if template not in (constants.DTS_NO_FREE_SPACE_CHECK):
+ if (template not in (constants.DTS_NO_FREE_SPACE_CHECK) and
+ not any(self.node_es_flags.values())):
# TODO: check the free disk space for file, when that feature will be
# supported
- nodes = map(self.cfg.GetNodeInfo, node_uuids)
- es_nodes = filter(lambda n: IsExclusiveStorageEnabledNode(self.cfg, n),
- nodes)
- if es_nodes:
- # With exclusive storage we need to something smarter than just looking
- # at free space; for now, let's simply abort the operation.
- raise errors.OpPrereqError("Cannot grow disks when exclusive_storage"
- " is enabled", errors.ECODE_STATE)
+ # With exclusive storage we need to do something smarter than just looking
+ # at free space, which, in the end, is basically a dry run. So we rely on
+ # the dry run performed in Exec() instead.
CheckNodesFreeDiskPerVG(self, node_uuids, req_vgspace)
def Exec(self, feedback_fn):
"""Execute disk grow.
"""
- instance = self.instance
- disk = self.disk
-
- assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
+ assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES))
wipe_disks = self.cfg.GetClusterInfo().prealloc_wipe_disks
- disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[disk])
+ disks_ok, _ = AssembleInstanceDisks(self, self.instance, disks=[self.disk])
if not disks_ok:
raise errors.OpExecError("Cannot activate block device to grow")
feedback_fn("Growing disk %s of instance '%s' by %s to %s" %
- (self.op.disk, instance.name,
+ (self.op.disk, self.instance.name,
utils.FormatUnit(self.delta, "h"),
utils.FormatUnit(self.target, "h")))
# First run all grow ops in dry-run mode
- for node_uuid in instance.all_nodes:
- self.cfg.SetDiskID(disk, node_uuid)
- result = self.rpc.call_blockdev_grow(node_uuid, (disk, instance),
- self.delta, True, True)
+ for node_uuid in self.instance.all_nodes:
+ self.cfg.SetDiskID(self.disk, node_uuid)
+ result = self.rpc.call_blockdev_grow(node_uuid,
+ (self.disk, self.instance),
+ self.delta, True, True,
+ self.node_es_flags[node_uuid])
result.Raise("Dry-run grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
if wipe_disks:
# Get disk size from primary node for wiping
- result = self.rpc.call_blockdev_getdimensions(instance.primary_node,
- [disk])
+ self.cfg.SetDiskID(self.disk, self.instance.primary_node)
+ result = self.rpc.call_blockdev_getdimensions(self.instance.primary_node,
+ [self.disk])
result.Raise("Failed to retrieve disk size from node '%s'" %
- instance.primary_node)
+ self.instance.primary_node)
(disk_dimensions, ) = result.payload
if disk_dimensions is None:
raise errors.OpExecError("Failed to retrieve disk size from primary"
- " node '%s'" % instance.primary_node)
+ " node '%s'" % self.instance.primary_node)
(disk_size_in_bytes, _) = disk_dimensions
old_disk_size = _DiskSizeInBytesToMebibytes(self, disk_size_in_bytes)
- assert old_disk_size >= disk.size, \
+ assert old_disk_size >= self.disk.size, \
("Retrieved disk size too small (got %s, should be at least %s)" %
- (old_disk_size, disk.size))
+ (old_disk_size, self.disk.size))
else:
old_disk_size = None
# We know that (as far as we can test) operations across different
# nodes will succeed, time to run it for real on the backing storage
- for node_uuid in instance.all_nodes:
- self.cfg.SetDiskID(disk, node_uuid)
- result = self.rpc.call_blockdev_grow(node_uuid, (disk, instance),
- self.delta, False, True)
+ for node_uuid in self.instance.all_nodes:
+ self.cfg.SetDiskID(self.disk, node_uuid)
+ result = self.rpc.call_blockdev_grow(node_uuid,
+ (self.disk, self.instance),
+ self.delta, False, True,
+ self.node_es_flags[node_uuid])
result.Raise("Grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
# And now execute it for logical storage, on the primary node
- node_uuid = instance.primary_node
- self.cfg.SetDiskID(disk, node_uuid)
- result = self.rpc.call_blockdev_grow(node_uuid, (disk, instance),
- self.delta, False, False)
+ node_uuid = self.instance.primary_node
+ self.cfg.SetDiskID(self.disk, node_uuid)
+ result = self.rpc.call_blockdev_grow(node_uuid, (self.disk, self.instance),
+ self.delta, False, False,
+ self.node_es_flags[node_uuid])
result.Raise("Grow request failed to node %s" %
self.cfg.GetNodeName(node_uuid))
- disk.RecordGrow(self.delta)
- self.cfg.Update(instance, feedback_fn)
+ self.disk.RecordGrow(self.delta)
+ self.cfg.Update(self.instance, feedback_fn)
# Changes have been recorded, release node lock
ReleaseLocks(self, locking.LEVEL_NODE)
assert wipe_disks ^ (old_disk_size is None)
if wipe_disks:
- assert instance.disks[self.op.disk] == disk
+ assert self.instance.disks[self.op.disk] == self.disk
# Wipe newly added disk space
- WipeDisks(self, instance,
- disks=[(self.op.disk, disk, old_disk_size)])
+ WipeDisks(self, self.instance,
+ disks=[(self.op.disk, self.disk, old_disk_size)])
if self.op.wait_for_sync:
- disk_abort = not WaitForSync(self, instance, disks=[disk])
+ disk_abort = not WaitForSync(self, self.instance, disks=[self.disk])
if disk_abort:
self.LogWarning("Disk syncing has not returned a good status; check"
" the instance")
- if not instance.disks_active:
- _SafeShutdownInstanceDisks(self, instance, disks=[disk])
- elif not instance.disks_active:
+ if not self.instance.disks_active:
+ _SafeShutdownInstanceDisks(self, self.instance, disks=[self.disk])
+ elif not self.instance.disks_active:
self.LogWarning("Not shutting down the disk even if the instance is"
" not supposed to be running because no wait for"
" sync mode was requested")
assert self.owned_locks(locking.LEVEL_NODE_RES)
- assert set([instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
+ assert set([self.instance.name]) == self.owned_locks(locking.LEVEL_INSTANCE)
class LUInstanceReplaceDisks(LogicalUnit):
"""Check arguments.
"""
- remote_node = self.op.remote_node
- ialloc = self.op.iallocator
if self.op.mode == constants.REPLACE_DISK_CHG:
- if remote_node is None and ialloc is None:
+ if self.op.remote_node is None and self.op.iallocator is None:
raise errors.OpPrereqError("When changing the secondary either an"
" iallocator script must be used or the"
" new node given", errors.ECODE_INVAL)
else:
CheckIAllocatorOrNode(self, "iallocator", "remote_node")
- elif remote_node is not None or ialloc is not None:
+ elif self.op.remote_node is not None or self.op.iallocator is not None:
# Not replacing the secondary
raise errors.OpPrereqError("The iallocator and new node options can"
" only be used when changing the"
self.needed_locks[locking.LEVEL_NODE_RES] = []
- self.replacer = TLReplaceDisks(self, self.op.instance_name, self.op.mode,
+ self.replacer = TLReplaceDisks(self, self.op.instance_uuid,
+ self.op.instance_name, self.op.mode,
self.op.iallocator, self.op.remote_node_uuid,
self.op.disks, self.op.early_release,
self.op.ignore_ipolicy)
# Lock all groups used by instance optimistically; this requires going
# via the node before it's locked, requiring verification later on
self.needed_locks[locking.LEVEL_NODEGROUP] = \
- self.cfg.GetInstanceNodeGroups(self.op.instance_name)
+ self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
elif level == locking.LEVEL_NODE:
if self.op.iallocator is not None:
# Verify if node group locks are still correct
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP)
if owned_groups:
- CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups)
+ CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, owned_groups)
return LogicalUnit.CheckPrereq(self)
This checks that the instance is in the cluster.
"""
- self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
CheckNodeOnline(self, self.instance.primary_node)
if self.op.wait_for_sync:
if not WaitForSync(self, self.instance):
- self.cfg.MarkInstanceDisksInactive(self.instance.name)
+ self.cfg.MarkInstanceDisksInactive(self.instance.uuid)
raise errors.OpExecError("Some disks of the instance are degraded!")
return disks_info
This checks that the instance is in the cluster.
"""
- self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
"""Deactivate the disks
"""
- instance = self.instance
if self.op.force:
- ShutdownInstanceDisks(self, instance)
+ ShutdownInstanceDisks(self, self.instance)
else:
- _SafeShutdownInstanceDisks(self, instance)
+ _SafeShutdownInstanceDisks(self, self.instance)
def _CheckDiskConsistencyInner(lu, instance, dev, node_uuid, on_primary,
Note: Locking is not within the scope of this class.
"""
- def __init__(self, lu, instance_name, mode, iallocator_name, remote_node_uuid,
- disks, early_release, ignore_ipolicy):
+ def __init__(self, lu, instance_uuid, instance_name, mode, iallocator_name,
+ remote_node_uuid, disks, early_release, ignore_ipolicy):
"""Initializes this class.
"""
Tasklet.__init__(self, lu)
# Parameters
+ self.instance_uuid = instance_uuid
self.instance_name = instance_name
self.mode = mode
self.iallocator_name = iallocator_name
self.node_secondary_ip = None
@staticmethod
- def _RunAllocator(lu, iallocator_name, instance_name,
+ def _RunAllocator(lu, iallocator_name, instance_uuid,
relocate_from_node_uuids):
"""Compute a new secondary node using an IAllocator.
"""
req = iallocator.IAReqRelocate(
- name=instance_name,
+ inst_uuid=instance_uuid,
relocate_from_node_uuids=list(relocate_from_node_uuids))
ial = iallocator.IAllocator(lu.cfg, lu.rpc, req)
remote_node_name, errors.ECODE_NOENT)
lu.LogInfo("Selected new secondary for instance '%s': %s",
- instance_name, remote_node_name)
+ instance_uuid, remote_node_name)
return remote_node.uuid
This checks that the instance is in the cluster.
"""
- self.instance = instance = self.cfg.GetInstanceInfo(self.instance_name)
- assert instance is not None, \
+ self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
+ assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.instance_name
- if instance.disk_template != constants.DT_DRBD8:
+ if self.instance.disk_template != constants.DT_DRBD8:
raise errors.OpPrereqError("Can only run replace disks for DRBD8-based"
" instances", errors.ECODE_INVAL)
- if len(instance.secondary_nodes) != 1:
+ if len(self.instance.secondary_nodes) != 1:
raise errors.OpPrereqError("The instance has a strange layout,"
" expected one secondary but found %d" %
- len(instance.secondary_nodes),
+ len(self.instance.secondary_nodes),
errors.ECODE_FAULT)
- instance = self.instance
- secondary_node_uuid = instance.secondary_nodes[0]
+ secondary_node_uuid = self.instance.secondary_nodes[0]
if self.iallocator_name is None:
remote_node_uuid = self.remote_node_uuid
else:
remote_node_uuid = self._RunAllocator(self.lu, self.iallocator_name,
- instance.name,
- instance.secondary_nodes)
+ self.instance.uuid,
+ self.instance.secondary_nodes)
if remote_node_uuid is None:
self.remote_node_info = None
errors.ECODE_INVAL)
if self.mode == constants.REPLACE_DISK_AUTO:
- if not self._CheckDisksActivated(instance):
+ if not self._CheckDisksActivated(self.instance):
raise errors.OpPrereqError("Please run activate-disks on instance %s"
" first" % self.instance_name,
errors.ECODE_STATE)
- faulty_primary = self._FindFaultyDisks(instance.primary_node)
+ faulty_primary = self._FindFaultyDisks(self.instance.primary_node)
faulty_secondary = self._FindFaultyDisks(secondary_node_uuid)
if faulty_primary and faulty_secondary:
if faulty_primary:
self.disks = faulty_primary
- self.target_node_uuid = instance.primary_node
+ self.target_node_uuid = self.instance.primary_node
self.other_node_uuid = secondary_node_uuid
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif faulty_secondary:
self.disks = faulty_secondary
self.target_node_uuid = secondary_node_uuid
- self.other_node_uuid = instance.primary_node
+ self.other_node_uuid = self.instance.primary_node
check_nodes = [self.target_node_uuid, self.other_node_uuid]
else:
self.disks = []
else:
# Non-automatic modes
if self.mode == constants.REPLACE_DISK_PRI:
- self.target_node_uuid = instance.primary_node
+ self.target_node_uuid = self.instance.primary_node
self.other_node_uuid = secondary_node_uuid
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif self.mode == constants.REPLACE_DISK_SEC:
self.target_node_uuid = secondary_node_uuid
- self.other_node_uuid = instance.primary_node
+ self.other_node_uuid = self.instance.primary_node
check_nodes = [self.target_node_uuid, self.other_node_uuid]
elif self.mode == constants.REPLACE_DISK_CHG:
self.new_node_uuid = remote_node_uuid
- self.other_node_uuid = instance.primary_node
+ self.other_node_uuid = self.instance.primary_node
self.target_node_uuid = secondary_node_uuid
check_nodes = [self.new_node_uuid, self.other_node_uuid]
cluster = self.cfg.GetClusterInfo()
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
new_group_info)
- CheckTargetNodeIPolicy(self, ipolicy, instance, self.remote_node_info,
- self.cfg, ignore=self.ignore_ipolicy)
+ CheckTargetNodeIPolicy(self, ipolicy, self.instance,
+ self.remote_node_info, self.cfg,
+ ignore=self.ignore_ipolicy)
for node_uuid in check_nodes:
CheckNodeOnline(self.lu, node_uuid)
# Check whether disks are valid
for disk_idx in self.disks:
- instance.FindDisk(disk_idx)
+ self.instance.FindDisk(disk_idx)
# Get secondary node IP addresses
self.node_secondary_ip = dict((uuid, node.secondary_ip) for (uuid, node)
# we pass force_create=True to force the LVM creation
for new_lv in new_lvs:
- _CreateBlockDevInner(self.lu, node_uuid, self.instance, new_lv, True,
- GetInstanceInfoText(self.instance), False,
- excl_stor)
+ try:
+ _CreateBlockDevInner(self.lu, node_uuid, self.instance, new_lv, True,
+ GetInstanceInfoText(self.instance), False,
+ excl_stor)
+ except errors.DeviceCreationError, e:
+ raise errors.OpExecError("Can't create block device: %s" % e.message)
return iv_names
(self.cfg.GetNodeName(self.new_node_uuid), idx))
# we pass force_create=True to force LVM creation
for new_lv in dev.children:
- _CreateBlockDevInner(self.lu, self.new_node_uuid, self.instance, new_lv,
- True, GetInstanceInfoText(self.instance), False,
- excl_stor)
+ try:
+ _CreateBlockDevInner(self.lu, self.new_node_uuid, self.instance,
+ new_lv, True, GetInstanceInfoText(self.instance),
+ False, excl_stor)
+ except errors.DeviceCreationError, e:
+ raise errors.OpExecError("Can't create block device: %s" % e.message)
# Step 4: dbrd minors and drbd setups changes
# after this, we must manually remove the drbd minors on both the
self.lu.LogStep(4, steps_total, "Changing drbd configuration")
minors = self.cfg.AllocateDRBDMinor([self.new_node_uuid
for _ in self.instance.disks],
- self.instance.name)
+ self.instance.uuid)
logging.debug("Allocated minors %r", minors)
iv_names = {}
GetInstanceInfoText(self.instance), False,
excl_stor)
except errors.GenericError:
- self.cfg.ReleaseDRBDMinors(self.instance.name)
+ self.cfg.ReleaseDRBDMinors(self.instance.uuid)
raise
# We have new devices, shutdown the drbd on the old secondary
msg = result.fail_msg
if msg:
# detaches didn't succeed (unlikely)
- self.cfg.ReleaseDRBDMinors(self.instance.name)
+ self.cfg.ReleaseDRBDMinors(self.instance.uuid)
raise errors.OpExecError("Can't detach the disks from the network on"
" old node: %s" % (msg,))