from ganeti import masterd
from ganeti import netutils
from ganeti import objects
-from ganeti import opcodes
from ganeti import pathutils
from ganeti import rpc
from ganeti import utils
IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
- CheckDiskTemplateEnabled
+ CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
from ganeti.cmdlib.instance_storage import CreateDisks, \
CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
" is allowed to be passed",
errors.ECODE_INVAL)
- if vlan is not None and nic_mode != constants.NIC_MODE_OVS:
- raise errors.OpPrereqError("VLAN is given, but network mode is not"
- " openvswitch", errors.ECODE_INVAL)
-
# ip validity checks
if ip is None or ip.lower() == constants.VALUE_NONE:
nic_ip = None
"""
for nic in self.op.nics:
- if nic[constants.INIC_VLAN]:
- vlan = nic[constants.INIC_VLAN]
+ vlan = nic.get(constants.INIC_VLAN, None)
+ if vlan:
if vlan[0] == ".":
# vlan starting with dot means single untagged vlan,
# might be followed by trunk (:)
self._CheckVLANArguments()
self._CheckDiskArguments()
+ assert self.op.disk_template is not None
# instance name verification
if self.op.name_check:
raise errors.OpPrereqError("Invalid file driver name '%s'" %
self.op.file_driver, errors.ECODE_INVAL)
+ # set default file_driver if unset and required
+ if (not self.op.file_driver and
+ self.op.disk_template in [constants.DT_FILE,
+ constants.DT_SHARED_FILE]):
+ self.op.file_driver = constants.FD_LOOP
+
### Node/iallocator related checks
CheckIAllocatorOrNode(self, "iallocator", "pnode")
_CheckOpportunisticLocking(self.op)
- self._cds = GetClusterDomainSecret()
-
if self.op.mode == constants.INSTANCE_IMPORT:
# On import force_variant must be True, because if we forced it at
# initial install, our only chance when importing it back is that it
raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
" installation" % self.op.os_type,
errors.ECODE_STATE)
- if self.op.disk_template is None:
- raise errors.OpPrereqError("No disk template specified",
- errors.ECODE_INVAL)
-
elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
+ self._cds = GetClusterDomainSecret()
+
# Check handshake to ensure both clusters have the same domain secret
src_handshake = self.op.source_handshake
if not src_handshake:
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
if not os.path.isabs(src_path):
- self.op.src_path = src_path = \
+ self.op.src_path = \
utils.PathJoin(pathutils.EXPORT_DIR, src_path)
self.needed_locks[locking.LEVEL_NODE_RES] = \
else:
node_name_whitelist = None
- #TODO Export network to iallocator so that it chooses a pnode
- # in a nodegroup that has the desired network connected to
req = _CreateInstanceAllocRequest(self.op, self.disks,
self.nics, self.be_full,
node_name_whitelist)
locked_nodes = self.owned_locks(locking.LEVEL_NODE)
exp_list = self.rpc.call_export_list(locked_nodes)
found = False
- for node in exp_list:
- if exp_list[node].fail_msg:
+ for node_uuid in exp_list:
+ if exp_list[node_uuid].fail_msg:
continue
- if self.op.src_path in exp_list[node].payload:
+ if self.op.src_path in exp_list[node_uuid].payload:
found = True
- self.op.src_node = node
- self.op.src_node_uuid = self.cfg.GetNodeInfoByName(node).uuid
+ self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name
+ self.op.src_node_uuid = node_uuid
self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR,
self.op.src_path)
break
if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
ndict = {}
for name in list(constants.NICS_PARAMETERS) + ["ip", "mac"]:
- v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
- ndict[name] = v
+ nic_param_name = "nic%d_%s" % (idx, name)
+ if einfo.has_option(constants.INISECT_INS, nic_param_name):
+ v = einfo.get(constants.INISECT_INS, nic_param_name)
+ ndict[name] = v
nics.append(ndict)
else:
break
netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
if netparams is None:
raise errors.OpPrereqError("No netparams found for network"
- " %s. Propably not connected to"
+ " %s. Probably not connected to"
" node's %s nodegroup" %
(nobj.name, self.pnode.name),
errors.ECODE_INVAL)
self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
else:
try:
- self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
+ self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
+ check=self.op.conflicts_check)
except errors.ReservationError:
raise errors.OpPrereqError("IP address %s already in use"
" or does not belong to network %s" %
elif self.op.disk_template == constants.DT_EXT:
# FIXME: Function that checks prereqs if needed
pass
- elif self.op.disk_template in utils.GetLvmDiskTemplates():
+ elif self.op.disk_template in constants.DTS_LVM:
# Check lv size requirements, if not adopting
req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
dsk[constants.IDISK_SIZE] = \
int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
+ # Check disk access param to be compatible with specified hypervisor
+ node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
+ node_group = self.cfg.GetNodeGroup(node_info.group)
+ disk_params = self.cfg.GetGroupDiskParams(node_group)
+ access_type = disk_params[self.op.disk_template].get(
+ constants.RBD_ACCESS, constants.DISK_KERNELSPACE
+ )
+
+ if not IsValidDiskAccessModeCombination(self.op.hypervisor,
+ self.op.disk_template,
+ access_type):
+ raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
+ " used with %s disk access param" %
+ (self.op.hypervisor, access_type),
+ errors.ECODE_STATE)
+
# Verify instance specs
spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
ispec = {
for t_dsk, a_dsk in zip(tmp_disks, self.disks):
rename_to.append(t_dsk.logical_id)
t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
- self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
result = self.rpc.call_blockdev_rename(self.pnode.uuid,
zip(tmp_disks, rename_to))
result.Raise("Failed to rename adoped LVs")
ReleaseLocks(self, locking.LEVEL_NODE_RES)
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
- # we need to set the disks ID to the primary node, since the
- # preceding code might or might have not done it, depending on
- # disk template and other options
- for disk in iobj.disks:
- self.cfg.SetDiskID(disk, self.pnode.uuid)
if self.op.mode == constants.INSTANCE_CREATE:
if not self.op.no_install:
pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
dt = masterd.instance.DiskTransfer("disk/%s" % idx,
constants.IEIO_FILE, (image, ),
constants.IEIO_SCRIPT,
- (iobj.disks[idx], idx),
+ ((iobj.disks[idx], iobj), idx),
None)
transfers.append(dt)
info = GetInstanceInfoText(renamed_inst)
for (idx, disk) in enumerate(renamed_inst.disks):
for node_uuid in renamed_inst.all_nodes:
- self.cfg.SetDiskID(disk, node_uuid)
- result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
+ result = self.rpc.call_blockdev_setinfo(node_uuid,
+ (disk, renamed_inst), info)
result.Warn("Error setting info on node %s for disk %s" %
(self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
try:
(self.op.target_node_uuid, self.op.target_node) = \
ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
self.op.target_node)
- self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node]
+ self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
self.needed_locks[locking.LEVEL_NODE_RES] = []
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
bep = self.cfg.GetClusterInfo().FillBE(self.instance)
for idx, dsk in enumerate(self.instance.disks):
- if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
+ if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
+ constants.DT_SHARED_FILE):
raise errors.OpPrereqError("Instance disk %d has a complex layout,"
" cannot copy" % idx, errors.ECODE_STATE)
idx, result.fail_msg)
errs.append(result.fail_msg)
break
- dev_path = result.payload
+ dev_path, _ = result.payload
result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
self.instance),
- target_node.name, dev_path,
- cluster_name)
+ target_node.secondary_ip,
+ dev_path, cluster_name)
if result.fail_msg:
self.LogWarning("Can't copy data over for disk %d: %s",
idx, result.fail_msg)
" pnode/snode while others do not",
errors.ECODE_INVAL)
- if self.op.iallocator is None:
+ if not has_nodes and self.op.iallocator is None:
default_iallocator = self.cfg.GetDefaultIAllocator()
- if default_iallocator and has_nodes:
+ if default_iallocator:
self.op.iallocator = default_iallocator
else:
raise errors.OpPrereqError("No iallocator or nodes on the instances"
for inst in self.op.instances:
(inst.pnode_uuid, inst.pnode) = \
ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
- nodeslist.append(inst.pnode)
+ nodeslist.append(inst.pnode_uuid)
if inst.snode is not None:
(inst.snode_uuid, inst.snode) = \
ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
- nodeslist.append(inst.snode)
+ nodeslist.append(inst.snode_uuid)
self.needed_locks[locking.LEVEL_NODE] = nodeslist
# Lock resources of instance's primary and secondary nodes (copy to
"""Check prerequisite.
"""
- cluster = self.cfg.GetClusterInfo()
- default_vg = self.cfg.GetVGName()
- ec_id = self.proc.GetECId()
+ if self.op.iallocator:
+ cluster = self.cfg.GetClusterInfo()
+ default_vg = self.cfg.GetVGName()
+ ec_id = self.proc.GetECId()
- if self.op.opportunistic_locking:
- # Only consider nodes for which a lock is held
- node_whitelist = self.cfg.GetNodeNames(
- list(self.owned_locks(locking.LEVEL_NODE)))
- else:
- node_whitelist = None
+ if self.op.opportunistic_locking:
+ # Only consider nodes for which a lock is held
+ node_whitelist = self.cfg.GetNodeNames(
+ list(self.owned_locks(locking.LEVEL_NODE)))
+ else:
+ node_whitelist = None
- insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
- _ComputeNics(op, cluster, None,
- self.cfg, ec_id),
- _ComputeFullBeParams(op, cluster),
- node_whitelist)
- for op in self.op.instances]
+ insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
+ _ComputeNics(op, cluster, None,
+ self.cfg, ec_id),
+ _ComputeFullBeParams(op, cluster),
+ node_whitelist)
+ for op in self.op.instances]
- req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
- ial = iallocator.IAllocator(self.cfg, self.rpc, req)
+ req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
+ ial = iallocator.IAllocator(self.cfg, self.rpc, req)
- ial.Run(self.op.iallocator)
+ ial.Run(self.op.iallocator)
- if not ial.success:
- raise errors.OpPrereqError("Can't compute nodes using"
- " iallocator '%s': %s" %
- (self.op.iallocator, ial.info),
- errors.ECODE_NORES)
+ if not ial.success:
+ raise errors.OpPrereqError("Can't compute nodes using"
+ " iallocator '%s': %s" %
+ (self.op.iallocator, ial.info),
+ errors.ECODE_NORES)
- self.ia_result = ial.result
+ self.ia_result = ial.result
if self.op.dry_run:
self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
"""Contructs the partial result.
"""
- (allocatable, failed) = self.ia_result
+ if self.op.iallocator:
+ (allocatable, failed_insts) = self.ia_result
+ allocatable_insts = map(compat.fst, allocatable)
+ else:
+ allocatable_insts = [op.instance_name for op in self.op.instances]
+ failed_insts = []
+
return {
- opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
- map(compat.fst, allocatable),
- opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
+ constants.ALLOCATABLE_KEY: allocatable_insts,
+ constants.FAILED_KEY: failed_insts,
}
def Exec(self, feedback_fn):
"""Executes the opcode.
"""
- op2inst = dict((op.instance_name, op) for op in self.op.instances)
- (allocatable, failed) = self.ia_result
-
jobs = []
- for (name, node_names) in allocatable:
- op = op2inst.pop(name)
+ if self.op.iallocator:
+ op2inst = dict((op.instance_name, op) for op in self.op.instances)
+ (allocatable, failed) = self.ia_result
+
+ for (name, node_names) in allocatable:
+ op = op2inst.pop(name)
- (op.pnode_uuid, op.pnode) = \
- ExpandNodeUuidAndName(self.cfg, None, node_names[0])
- if len(node_names) > 1:
- (op.snode_uuid, op.snode) = \
- ExpandNodeUuidAndName(self.cfg, None, node_names[1])
+ (op.pnode_uuid, op.pnode) = \
+ ExpandNodeUuidAndName(self.cfg, None, node_names[0])
+ if len(node_names) > 1:
+ (op.snode_uuid, op.snode) = \
+ ExpandNodeUuidAndName(self.cfg, None, node_names[1])
- jobs.append([op])
+ jobs.append([op])
- missing = set(op2inst.keys()) - set(failed)
- assert not missing, \
- "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
+ missing = set(op2inst.keys()) - set(failed)
+ assert not missing, \
+ "Iallocator did return incomplete result: %s" % \
+ utils.CommaJoin(missing)
+ else:
+ jobs.extend([op] for op in self.op.instances)
return ResultWithJobs(jobs, **self._ConstructPartialResult())
def _ApplyContainerMods(kind, container, chgdesc, mods,
- create_fn, modify_fn, remove_fn):
+ create_fn, modify_fn, remove_fn,
+ post_add_fn=None):
"""Applies descriptions in C{mods} to C{container}.
@type kind: string
@type remove_fn: callable
@param remove_fn: Callback on removing item; receives absolute item index,
item and private data object as added by L{_PrepareContainerMods}
+ @type post_add_fn: callable
+ @param post_add_fn: Callable for post-processing a newly created item after
+ it has been put into the container. It receives the index of the new item
+ and the new item as parameters.
"""
for (op, identifier, params, private) in mods:
assert idx <= len(container)
# list.insert does so before the specified index
container.insert(idx, item)
+
+ if post_add_fn is not None:
+ post_add_fn(addidx, item)
+
else:
# Retrieve existing item
(absidx, item) = GetItemFromContainer(identifier, kind, container)
if op == constants.DDM_REMOVE:
assert not params
- if remove_fn is not None:
- remove_fn(absidx, item, private)
-
changes = [("%s/%s" % (kind, absidx), "remove")]
+ if remove_fn is not None:
+ msg = remove_fn(absidx, item, private)
+ if msg:
+ changes.append(("%s/%s" % (kind, absidx), msg))
+
assert container[absidx] == item
del container[absidx]
elif op == constants.DDM_MODIFY:
if size is None:
raise errors.OpPrereqError("Required disk parameter '%s' missing" %
constants.IDISK_SIZE, errors.ECODE_INVAL)
-
- try:
- size = int(size)
- except (TypeError, ValueError), err:
- raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
- errors.ECODE_INVAL)
+ size = int(size)
params[constants.IDISK_SIZE] = size
name = params.get(constants.IDISK_NAME, None)
def CheckArguments(self):
if not (self.op.nics or self.op.disks or self.op.disk_template or
self.op.hvparams or self.op.beparams or self.op.os_name or
- self.op.offline is not None or self.op.runtime_mem or
- self.op.pnode):
+ self.op.osparams or self.op.offline is not None or
+ self.op.runtime_mem or self.op.pnode):
raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
if self.op.hvparams:
"hypervisor", "instance", "cluster")
self.op.disks = self._UpgradeDiskNicMods(
- "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
+ "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
self.op.nics = self._UpgradeDiskNicMods(
- "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
+ "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
if self.op.disks and self.op.disk_template is not None:
raise errors.OpPrereqError("Disk template conversion and other disk"
# Reserve new IP if in the new network if any
elif new_net_uuid:
try:
- self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
+ self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
+ check=self.op.conflicts_check)
self.LogInfo("Reserving IP %s in network %s",
new_ip, new_net_obj.name)
except errors.ReservationError:
self.instance.disk_template,
errors.ECODE_INVAL)
- if not self.cluster.IsDiskTemplateEnabled(self.instance.disk_template):
+ if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
- " cluster." % self.instance.disk_template)
+ " cluster." % self.op.disk_template)
if (self.instance.disk_template,
self.op.disk_template) not in self._DISK_CONVERSIONS:
constants.DT_EXT),
errors.ECODE_INVAL)
+ if not self.op.wait_for_sync and self.instance.disks_active:
+ for mod in self.diskmod:
+ if mod[0] == constants.DDM_ADD:
+ raise errors.OpPrereqError("Can't add a disk to an instance with"
+ " activated disks and"
+ " --no-wait-for-sync given.",
+ errors.ECODE_INVAL)
+
if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Disk operations not supported for"
" diskless instances", errors.ECODE_INVAL)
assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
self.cluster = self.cfg.GetClusterInfo()
+ cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
# verify that the instance is not up
instance_info = self.rpc.call_instance_info(
pnode_uuid, self.instance.name, self.instance.hypervisor,
- self.instance.hvparams)
+ cluster_hvparams)
if instance_info.fail_msg:
self.warn.append("Can't get instance runtime information: %s" %
instance_info.fail_msg)
# dictionary with instance information after the modification
ispec = {}
+ if self.op.hotplug or self.op.hotplug_if_possible:
+ result = self.rpc.call_hotplug_supported(self.instance.primary_node,
+ self.instance)
+ if result.fail_msg:
+ if self.op.hotplug:
+ result.Raise("Hotplug is not possible: %s" % result.fail_msg,
+ prereq=True)
+ else:
+ self.LogWarning(result.fail_msg)
+ self.op.hotplug = False
+ self.LogInfo("Modification will take place without hotplugging.")
+ else:
+ self.op.hotplug = True
+
# Prepare NIC modifications
self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
mem_check_list.extend(self.instance.secondary_nodes)
instance_info = self.rpc.call_instance_info(
pnode_uuid, self.instance.name, self.instance.hypervisor,
- self.instance.hvparams)
+ cluster_hvparams)
hvspecs = [(self.instance.hypervisor,
- self.cluster.hvparams[self.instance.hypervisor])]
+ cluster_hvparams)]
nodeinfo = self.rpc.call_node_info(mem_check_list, None,
hvspecs)
pninfo = nodeinfo[pnode_uuid]
remote_info = self.rpc.call_instance_info(
self.instance.primary_node, self.instance.name,
self.instance.hypervisor,
- self.cluster.hvparams[self.instance.hypervisor])
+ cluster_hvparams)
remote_info.Raise("Error checking node %s" %
self.cfg.GetNodeName(self.instance.primary_node))
if not remote_info.payload: # not running already
# Operate on copies as this is still in prereq
nics = [nic.Copy() for nic in self.instance.nics]
_ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
- self._CreateNewNic, self._ApplyNicMods, None)
+ self._CreateNewNic, self._ApplyNicMods,
+ self._RemoveNic)
# Verify that NIC names are unique and valid
utils.ValidateDeviceNames("NIC", nics)
self._new_nics = nics
self.instance.uuid, pnode_uuid,
[snode_uuid], disk_info, None, None, 0,
feedback_fn, self.diskparams)
- anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
- self.diskparams)
+ anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
info = GetInstanceInfoText(self.instance)
except errors.GenericError, e:
feedback_fn("Initializing of DRBD devices failed;"
" renaming back original volumes...")
- for disk in new_disks:
- self.cfg.SetDiskID(disk, pnode_uuid)
rename_back_list = [(n.children[0], o.logical_id)
for (n, o) in zip(new_disks, self.instance.disks)]
result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
feedback_fn("Removing volumes on the secondary node...")
for disk in old_disks:
- self.cfg.SetDiskID(disk, snode_uuid)
- msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
- if msg:
- self.LogWarning("Could not remove block device %s on node %s,"
- " continuing anyway: %s", disk.iv_name,
- self.cfg.GetNodeName(snode_uuid), msg)
+ result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
+ result.Warn("Could not remove block device %s on node %s,"
+ " continuing anyway" %
+ (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
+ self.LogWarning)
feedback_fn("Removing unneeded volumes on the primary node...")
for idx, disk in enumerate(old_disks):
meta = disk.children[1]
- self.cfg.SetDiskID(meta, pnode_uuid)
- msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
- if msg:
- self.LogWarning("Could not remove metadata for disk %d on node %s,"
- " continuing anyway: %s", idx,
- self.cfg.GetNodeName(pnode_uuid), msg)
+ result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
+ result.Warn("Could not remove metadata for disk %d on node %s,"
+ " continuing anyway" %
+ (idx, self.cfg.GetNodeName(pnode_uuid)),
+ self.LogWarning)
+
+ def _HotplugDevice(self, action, dev_type, device, extra, seq):
+ self.LogInfo("Trying to hotplug device...")
+ msg = "hotplug:"
+ result = self.rpc.call_hotplug_device(self.instance.primary_node,
+ self.instance, action, dev_type,
+ (device, self.instance),
+ extra, seq)
+ if result.fail_msg:
+ self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
+ self.LogInfo("Continuing execution..")
+ msg += "failed"
+ else:
+ self.LogInfo("Hotplug done.")
+ msg += "done"
+ return msg
def _CreateNewDisk(self, idx, params, _):
"""Creates a new disk.
disks=[(idx, disk, 0)],
cleanup=new_disks)
- return (disk, [
- ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
- ])
+ changes = [
+ ("disk/%d" % idx,
+ "add:size=%s,mode=%s" % (disk.size, disk.mode)),
+ ]
+ if self.op.hotplug:
+ result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
+ (disk, self.instance),
+ self.instance.name, True, idx)
+ if result.fail_msg:
+ changes.append(("disk/%d" % idx, "assemble:failed"))
+ self.LogWarning("Can't assemble newly created disk %d: %s",
+ idx, result.fail_msg)
+ else:
+ _, link_name = result.payload
+ msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
+ constants.HOTPLUG_TARGET_DISK,
+ disk, link_name, idx)
+ changes.append(("disk/%d" % idx, msg))
+
+ return (disk, changes)
+
+ def _PostAddDisk(self, _, disk):
+ if not WaitForSync(self, self.instance, disks=[disk],
+ oneshot=not self.op.wait_for_sync):
+ raise errors.OpExecError("Failed to sync disks of %s" %
+ self.instance.name)
+
+ # the disk is active at this point, so deactivate it if the instance disks
+ # are supposed to be inactive
+ if not self.instance.disks_active:
+ ShutdownInstanceDisks(self, self.instance, disks=[disk])
@staticmethod
def _ModifyDisk(idx, disk, params, _):
"""Removes a disk.
"""
+ hotmsg = ""
+ if self.op.hotplug:
+ hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
+ constants.HOTPLUG_TARGET_DISK,
+ root, None, idx)
+ ShutdownInstanceDisks(self, self.instance, [root])
+
(anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
for node_uuid, disk in anno_disk.ComputeNodeTree(
self.instance.primary_node):
- self.cfg.SetDiskID(disk, node_uuid)
- msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
+ msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
+ .fail_msg
if msg:
self.LogWarning("Could not remove disk/%d on node '%s': %s,"
" continuing anyway", idx,
self.cfg.GetNodeName(node_uuid), msg)
# if this is a DRBD disk, return its port to the pool
- if root.dev_type in constants.LDS_DRBD:
+ if root.dev_type in constants.DTS_DRBD:
self.cfg.AddTcpUdpPort(root.logical_id[2])
+ return hotmsg
+
def _CreateNewNic(self, idx, params, private):
"""Creates data structure for a new network interface.
nicparams=nicparams)
nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
- return (nobj, [
+ changes = [
("nic.%d" % idx,
"add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
(mac, ip, private.filled[constants.NIC_MODE],
- private.filled[constants.NIC_LINK],
- net)),
- ])
+ private.filled[constants.NIC_LINK], net)),
+ ]
+
+ if self.op.hotplug:
+ msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
+ constants.HOTPLUG_TARGET_NIC,
+ nobj, None, idx)
+ changes.append(("nic.%d" % idx, msg))
+
+ return (nobj, changes)
def _ApplyNicMods(self, idx, nic, params, private):
"""Modifies a network interface.
for (key, val) in nic.nicparams.items():
changes.append(("nic.%s/%d" % (key, idx), val))
+ if self.op.hotplug:
+ msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
+ constants.HOTPLUG_TARGET_NIC,
+ nic, None, idx)
+ changes.append(("nic/%d" % idx, msg))
+
return changes
+ def _RemoveNic(self, idx, nic, _):
+ if self.op.hotplug:
+ return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
+ constants.HOTPLUG_TARGET_NIC,
+ nic, None, idx)
+
def Exec(self, feedback_fn):
"""Modifies an instance.
# Apply disk changes
_ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
self._CreateNewDisk, self._ModifyDisk,
- self._RemoveDisk)
+ self._RemoveDisk, post_add_fn=self._PostAddDisk)
_UpdateIvNames(0, self.instance.disks)
if self.op.disk_template: