import OpenSSL
import copy
-import itertools
import logging
-import operator
import os
from ganeti import compat
from ganeti import objects
from ganeti import opcodes
from ganeti import pathutils
-from ganeti import qlang
from ganeti import rpc
from ganeti import utils
-from ganeti import query
-
-from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, _QueryBase, \
- ResultWithJobs
-
-from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
- INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, _CheckNodeOnline, \
- _ShareAll, _GetDefaultIAllocator, _CheckInstanceNodeGroups, \
- _LoadNodeEvacResult, _CheckIAllocatorOrNode, _CheckParamsNotGlobal, \
- _IsExclusiveStorageEnabledNode, _CheckHVParams, _CheckOSParams, \
- _GetWantedInstances, _CheckInstancesNodeGroups, _AnnotateDiskParams, \
- _GetUpdatedParams, _ExpandInstanceName, _ComputeIPolicySpecViolation, \
- _CheckInstanceState, _ExpandNodeName
-from ganeti.cmdlib.instance_storage import _CreateDisks, \
- _CheckNodesFreeDiskPerVG, _WipeDisks, _WaitForSync, \
- _IsExclusiveStorageEnabledNodeName, _CreateSingleBlockDev, _ComputeDisks, \
- _CheckRADOSFreeSpace, _ComputeDiskSizePerVG, _GenerateDiskTemplate, \
- _CreateBlockDev, _StartInstanceDisks, _ShutdownInstanceDisks, \
- _AssembleInstanceDisks
-from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
- _GetClusterDomainSecret, _BuildInstanceHookEnv, _NICListToTuple, \
- _NICToTuple, _CheckNodeNotDrained, _RemoveInstance, _CopyLockList, \
- _ReleaseLocks, _CheckNodeVmCapable, _CheckTargetNodeIPolicy, \
- _GetInstanceInfoText, _RemoveDisks, _CheckNodeFreeMemory, \
- _CheckInstanceBridgesExist, _CheckNicsBridgesExist
+
+from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
+
+from ganeti.cmdlib.common import INSTANCE_DOWN, \
+ INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
+ ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
+ LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
+ IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
+ AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
+ ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
+from ganeti.cmdlib.instance_storage import CreateDisks, \
+ CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
+ IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
+ CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
+ StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
+from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
+ GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
+ NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
+ ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
+ GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
+ CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
import ganeti.masterd.instance
-#: Type description for changes as returned by L{ApplyContainerMods}'s
+#: Type description for changes as returned by L{_ApplyContainerMods}'s
#: callbacks
_TApplyContModsCbChanges = \
ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
def _ComputeIPolicyInstanceSpecViolation(
ipolicy, instance_spec, disk_template,
- _compute_fn=_ComputeIPolicySpecViolation):
+ _compute_fn=ComputeIPolicySpecViolation):
"""Compute if instance specs meets the specs of ipolicy.
@type ipolicy: dict
@type disk_template: string
@param disk_template: the disk template of the instance
@param _compute_fn: The function to verify ipolicy (unittest only)
- @see: L{_ComputeIPolicySpecViolation}
+ @see: L{ComputeIPolicySpecViolation}
"""
mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
-def _CheckNodeHasOS(lu, node, os_name, force_variant):
- """Ensure that a node supports a given OS.
-
- @param lu: the LU on behalf of which we make the check
- @param node: the node to check
- @param os_name: the OS to query about
- @param force_variant: whether to ignore variant errors
- @raise errors.OpPrereqError: if the node is not supporting the OS
-
- """
- result = lu.rpc.call_os_get(node, os_name)
- result.Raise("OS '%s' not in supported OS list for node %s" %
- (os_name, node),
- prereq=True, ecode=errors.ECODE_INVAL)
- if not force_variant:
- _CheckOSVariant(result.payload, os_name)
-
-
class LUInstanceCreate(LogicalUnit):
"""Create an instance.
raise errors.OpPrereqError("Invalid file driver name '%s'" %
self.op.file_driver, errors.ECODE_INVAL)
+ # set default file_driver if unset and required
+ if (not self.op.file_driver and
+ self.op.disk_template in [constants.DT_FILE,
+ constants.DT_SHARED_FILE]):
+ self.op.file_driver = constants.FD_DEFAULT
+
if self.op.disk_template == constants.DT_FILE:
opcodes.RequireFileStorage()
elif self.op.disk_template == constants.DT_SHARED_FILE:
opcodes.RequireSharedFileStorage()
### Node/iallocator related checks
- _CheckIAllocatorOrNode(self, "iallocator", "pnode")
+ CheckIAllocatorOrNode(self, "iallocator", "pnode")
if self.op.pnode is not None:
if self.op.disk_template in constants.DTS_INT_MIRROR:
_CheckOpportunisticLocking(self.op)
- self._cds = _GetClusterDomainSecret()
+ self._cds = GetClusterDomainSecret()
if self.op.mode == constants.INSTANCE_IMPORT:
# On import force_variant must be True, because if we forced it at
if self.op.opportunistic_locking:
self.opportunistic_locks[locking.LEVEL_NODE] = True
- self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
else:
- self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
+ self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
nodelist = [self.op.pnode]
if self.op.snode is not None:
- self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
+ self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
nodelist.append(self.op.snode)
self.needed_locks[locking.LEVEL_NODE] = nodelist
" requires a source node option",
errors.ECODE_INVAL)
else:
- self.op.src_node = src_node = _ExpandNodeName(self.cfg, src_node)
+ self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
self.needed_locks[locking.LEVEL_NODE].append(src_node)
if not os.path.isabs(src_path):
utils.PathJoin(pathutils.EXPORT_DIR, src_path)
self.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+ CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+
+ # Optimistically acquire shared group locks (we're reading the
+ # configuration). We can't just call GetInstanceNodeGroups, because the
+ # instance doesn't exist yet. Therefore we lock all node groups of all
+ # nodes we have.
+ if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
+ # In the case we lock all nodes for opportunistic allocation, we have no
+ # choice than to lock all groups, because they're allocated before nodes.
+ # This is sad, but true. At least we release all those we don't need in
+ # CheckPrereq later.
+ self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
+ else:
+ self.needed_locks[locking.LEVEL_NODEGROUP] = \
+ list(self.cfg.GetNodeGroupsFromNodes(
+ self.needed_locks[locking.LEVEL_NODE]))
+ self.share_locks[locking.LEVEL_NODEGROUP] = 1
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE_RES and \
+ self.opportunistic_locks[locking.LEVEL_NODE]:
+ # Even when using opportunistic locking, we require the same set of
+ # NODE_RES locks as we got NODE locks
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ self.owned_locks(locking.LEVEL_NODE)
def _RunAllocator(self):
"""Run the allocator based on input opcode.
env["SRC_PATH"] = self.op.src_path
env["SRC_IMAGES"] = self.src_images
- env.update(_BuildInstanceHookEnv(
+ env.update(BuildInstanceHookEnv(
name=self.op.instance_name,
primary_node=self.op.pnode,
secondary_nodes=self.secondaries,
minmem=self.be_full[constants.BE_MINMEM],
maxmem=self.be_full[constants.BE_MAXMEM],
vcpus=self.be_full[constants.BE_VCPUS],
- nics=_NICListToTuple(self, self.nics),
+ nics=NICListToTuple(self, self.nics),
disk_template=self.op.disk_template,
- disks=[(d[constants.IDISK_NAME], d[constants.IDISK_SIZE],
- d[constants.IDISK_MODE]) for d in self.disks],
+ disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
+ d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
+ for d in self.disks],
bep=self.be_full,
hvp=self.hv_full,
hypervisor_name=self.op.hypervisor,
raise errors.OpPrereqError("No export found for relative path %s" %
src_path, errors.ECODE_INVAL)
- _CheckNodeOnline(self, src_node)
+ CheckNodeOnline(self, src_node)
result = self.rpc.call_export_info(src_node, src_path)
result.Raise("No export or invalid export found in dir %s" % src_path)
"""Check prerequisites.
"""
+ # Check that the optimistically acquired groups are correct wrt the
+ # acquired nodes
+ owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+ owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
+ cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
+ if not owned_groups.issuperset(cur_groups):
+ raise errors.OpPrereqError("New instance %s's node groups changed since"
+ " locks were acquired, current groups are"
+ " are '%s', owning groups '%s'; retry the"
+ " operation" %
+ (self.op.instance_name,
+ utils.CommaJoin(cur_groups),
+ utils.CommaJoin(owned_groups)),
+ errors.ECODE_STATE)
+
self._CalculateFileStorageDir()
if self.op.mode == constants.INSTANCE_IMPORT:
hv_type.CheckParameterSyntax(filled_hvp)
self.hv_full = filled_hvp
# check that we don't specify global parameters on an instance
- _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
- "instance", "cluster")
+ CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
+ "instance", "cluster")
# fill and remember the beparams dict
self.be_full = _ComputeFullBeParams(self.op, cluster)
# disk checks/pre-build
default_vg = self.cfg.GetVGName()
- self.disks = _ComputeDisks(self.op, default_vg)
+ self.disks = ComputeDisks(self.op, default_vg)
if self.op.mode == constants.INSTANCE_IMPORT:
disk_images = []
# Release all unneeded node locks
keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
- _ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
- _ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
- _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
+ ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
+ ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
+ ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
+ # Release all unneeded group locks
+ ReleaseLocks(self, locking.LEVEL_NODEGROUP,
+ keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
assert (self.owned_locks(locking.LEVEL_NODE) ==
self.owned_locks(locking.LEVEL_NODE_RES)), \
if self.op.snode == pnode.name:
raise errors.OpPrereqError("The secondary node cannot be the"
" primary node", errors.ECODE_INVAL)
- _CheckNodeOnline(self, self.op.snode)
- _CheckNodeNotDrained(self, self.op.snode)
- _CheckNodeVmCapable(self, self.op.snode)
+ CheckNodeOnline(self, self.op.snode)
+ CheckNodeNotDrained(self, self.op.snode)
+ CheckNodeVmCapable(self, self.op.snode)
self.secondaries.append(self.op.snode)
snode = self.cfg.GetNodeInfo(self.op.snode)
nodes = [pnode]
if self.op.disk_template in constants.DTS_INT_MIRROR:
nodes.append(snode)
- has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+ has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
if compat.any(map(has_es, nodes)):
raise errors.OpPrereqError("Disk template %s not supported with"
" exclusive storage" % self.op.disk_template,
# _CheckRADOSFreeSpace() is just a placeholder.
# Any function that checks prerequisites can be placed here.
# Check if there is enough space on the RADOS cluster.
- _CheckRADOSFreeSpace()
+ CheckRADOSFreeSpace()
elif self.op.disk_template == constants.DT_EXT:
# FIXME: Function that checks prereqs if needed
pass
else:
# Check lv size requirements, if not adopting
- req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
- _CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
+ req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
+ CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data
all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
(pnode.group, group_info.name, utils.CommaJoin(res)))
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
- _CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
+ CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
- _CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
+ CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
# check OS parameters (remotely)
- _CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
+ CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
- _CheckNicsBridgesExist(self, self.nics, self.pnode.name)
+ CheckNicsBridgesExist(self, self.nics, self.pnode.name)
#TODO: _CheckExtParams (remotely)
# Check parameters for extstorage
# memory check on primary node
#TODO(dynmem): use MINMEM for checking
if self.op.start:
- _CheckNodeFreeMemory(self, self.pnode.name,
- "creating instance %s" % self.op.instance_name,
- self.be_full[constants.BE_MAXMEM],
- self.op.hypervisor)
+ CheckNodeFreeMemory(self, self.pnode.name,
+ "creating instance %s" % self.op.instance_name,
+ self.be_full[constants.BE_MAXMEM],
+ self.op.hypervisor)
self.dry_run_result = list(nodenames)
# has no disks yet (we are generating them right here).
node = self.cfg.GetNodeInfo(pnode_name)
nodegroup = self.cfg.GetNodeGroup(node.group)
- disks = _GenerateDiskTemplate(self,
- self.op.disk_template,
- instance, pnode_name,
- self.secondaries,
- self.disks,
- self.instance_file_storage_dir,
- self.op.file_driver,
- 0,
- feedback_fn,
- self.cfg.GetGroupDiskParams(nodegroup))
+ disks = GenerateDiskTemplate(self,
+ self.op.disk_template,
+ instance, pnode_name,
+ self.secondaries,
+ self.disks,
+ self.instance_file_storage_dir,
+ self.op.file_driver,
+ 0,
+ feedback_fn,
+ self.cfg.GetGroupDiskParams(nodegroup))
iobj = objects.Instance(name=instance, os=self.op.os_type,
primary_node=pnode_name,
nics=self.nics, disks=disks,
disk_template=self.op.disk_template,
+ disks_active=False,
admin_state=constants.ADMINST_DOWN,
network_port=network_port,
beparams=self.op.beparams,
else:
feedback_fn("* creating instance disks...")
try:
- _CreateDisks(self, iobj)
+ CreateDisks(self, iobj)
except errors.OpExecError:
self.LogWarning("Device creation failed")
self.cfg.ReleaseDRBDMinors(instance)
if self.op.mode == constants.INSTANCE_IMPORT:
# Release unused nodes
- _ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
+ ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
else:
# Release all nodes
- _ReleaseLocks(self, locking.LEVEL_NODE)
+ ReleaseLocks(self, locking.LEVEL_NODE)
disk_abort = False
if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
feedback_fn("* wiping instance disks...")
try:
- _WipeDisks(self, iobj)
+ WipeDisks(self, iobj)
except errors.OpExecError, err:
logging.exception("Wiping disks failed")
self.LogWarning("Wiping instance disks failed (%s)", err)
# Something is already wrong with the disks, don't do anything else
pass
elif self.op.wait_for_sync:
- disk_abort = not _WaitForSync(self, iobj)
+ disk_abort = not WaitForSync(self, iobj)
elif iobj.disk_template in constants.DTS_INT_MIRROR:
# make sure the disks are not degraded (still sync-ing is ok)
feedback_fn("* checking mirrors status")
- disk_abort = not _WaitForSync(self, iobj, oneshot=True)
+ disk_abort = not WaitForSync(self, iobj, oneshot=True)
else:
disk_abort = False
if disk_abort:
- _RemoveDisks(self, iobj)
+ RemoveDisks(self, iobj)
self.cfg.RemoveInstance(iobj.name)
# Make sure the instance lock gets removed
self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
raise errors.OpExecError("There are some degraded disks for"
" this instance")
+ # instance disks are now active
+ iobj.disks_active = True
+
# Release all node resource locks
- _ReleaseLocks(self, locking.LEVEL_NODE_RES)
+ ReleaseLocks(self, locking.LEVEL_NODE_RES)
if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
# we need to set the disks ID to the primary node, since the
This runs on master, primary and secondary nodes of the instance.
"""
- env = _BuildInstanceHookEnvByObject(self, self.instance)
+ env = BuildInstanceHookEnvByObject(self, self.instance)
env["INSTANCE_NEW_NAME"] = self.op.new_name
return env
This checks that the instance is in the cluster and is not running.
"""
- self.op.instance_name = _ExpandInstanceName(self.cfg,
- self.op.instance_name)
+ self.op.instance_name = ExpandInstanceName(self.cfg,
+ self.op.instance_name)
instance = self.cfg.GetInstanceInfo(self.op.instance_name)
assert instance is not None
- _CheckNodeOnline(self, instance.primary_node)
- _CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
- msg="cannot rename")
+ CheckNodeOnline(self, instance.primary_node)
+ CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
+ msg="cannot rename")
self.instance = instance
new_name = self.op.new_name
(inst.primary_node, old_file_storage_dir,
new_file_storage_dir))
- _StartInstanceDisks(self, inst, None)
+ StartInstanceDisks(self, inst, None)
# update info on disks
- info = _GetInstanceInfoText(inst)
+ info = GetInstanceInfoText(inst)
for (idx, disk) in enumerate(inst.disks):
for node in inst.all_nodes:
self.cfg.SetDiskID(disk, node)
(inst.name, inst.primary_node, msg))
self.LogWarning(msg)
finally:
- _ShutdownInstanceDisks(self, inst)
+ ShutdownInstanceDisks(self, inst)
return inst.name
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+ CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
This runs on master, primary and secondary nodes of the instance.
"""
- env = _BuildInstanceHookEnvByObject(self, self.instance)
+ env = BuildInstanceHookEnvByObject(self, self.instance)
env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
return env
self.owned_locks(locking.LEVEL_NODE)), \
"Not owning correct locks"
- _RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
+ RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
class LUInstanceMove(LogicalUnit):
def ExpandNames(self):
self._ExpandAndLockInstance()
- target_node = _ExpandNodeName(self.cfg, self.op.target_node)
+ target_node = ExpandNodeName(self.cfg, self.op.target_node)
self.op.target_node = target_node
self.needed_locks[locking.LEVEL_NODE] = [target_node]
self.needed_locks[locking.LEVEL_NODE_RES] = []
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+ CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
"TARGET_NODE": self.op.target_node,
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
}
- env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+ env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
def BuildHooksNodes(self):
raise errors.OpPrereqError("Instance disk %d has a complex layout,"
" cannot copy" % idx, errors.ECODE_STATE)
- _CheckNodeOnline(self, target_node)
- _CheckNodeNotDrained(self, target_node)
- _CheckNodeVmCapable(self, target_node)
+ CheckNodeOnline(self, target_node)
+ CheckNodeNotDrained(self, target_node)
+ CheckNodeVmCapable(self, target_node)
cluster = self.cfg.GetClusterInfo()
group_info = self.cfg.GetNodeGroup(node.group)
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
- _CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
- ignore=self.op.ignore_ipolicy)
+ CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
+ ignore=self.op.ignore_ipolicy)
if instance.admin_state == constants.ADMINST_UP:
# check memory requirements on the secondary node
- _CheckNodeFreeMemory(self, target_node,
- "failing over instance %s" %
- instance.name, bep[constants.BE_MAXMEM],
- instance.hypervisor)
+ CheckNodeFreeMemory(self, target_node,
+ "failing over instance %s" %
+ instance.name, bep[constants.BE_MAXMEM],
+ instance.hypervisor)
else:
self.LogInfo("Not checking memory on the secondary node as"
" instance will not be started")
# check bridge existance
- _CheckInstanceBridgesExist(self, instance, node=target_node)
+ CheckInstanceBridgesExist(self, instance, node=target_node)
def Exec(self, feedback_fn):
"""Move an instance.
# create the target disks
try:
- _CreateDisks(self, instance, target_node=target_node)
+ CreateDisks(self, instance, target_node=target_node)
except errors.OpExecError:
self.LogWarning("Device creation failed")
self.cfg.ReleaseDRBDMinors(instance.name)
idx, result.fail_msg)
errs.append(result.fail_msg)
break
- dev_path = result.payload
+ dev_path, _ = result.payload
result = self.rpc.call_blockdev_export(source_node, (disk, instance),
target_node, dev_path,
cluster_name)
if errs:
self.LogWarning("Some disks failed to copy, aborting")
try:
- _RemoveDisks(self, instance, target_node=target_node)
+ RemoveDisks(self, instance, target_node=target_node)
finally:
self.cfg.ReleaseDRBDMinors(instance.name)
raise errors.OpExecError("Errors during disk copy: %s" %
self.cfg.Update(instance, feedback_fn)
self.LogInfo("Removing the disks on the original node")
- _RemoveDisks(self, instance, target_node=source_node)
+ RemoveDisks(self, instance, target_node=source_node)
# Only start the instance if it's marked as up
if instance.admin_state == constants.ADMINST_UP:
self.LogInfo("Starting instance %s on node %s",
instance.name, target_node)
- disks_ok, _ = _AssembleInstanceDisks(self, instance,
- ignore_secondaries=True)
+ disks_ok, _ = AssembleInstanceDisks(self, instance,
+ ignore_secondaries=True)
if not disks_ok:
- _ShutdownInstanceDisks(self, instance)
+ ShutdownInstanceDisks(self, instance)
raise errors.OpExecError("Can't activate the instance's disks")
result = self.rpc.call_instance_start(target_node,
self.op.reason)
msg = result.fail_msg
if msg:
- _ShutdownInstanceDisks(self, instance)
+ ShutdownInstanceDisks(self, instance)
raise errors.OpExecError("Could not start instance %s on node %s: %s" %
(instance.name, target_node, msg))
-def _GetInstanceConsole(cluster, instance):
- """Returns console information for an instance.
-
- @type cluster: L{objects.Cluster}
- @type instance: L{objects.Instance}
- @rtype: dict
-
- """
- hyper = hypervisor.GetHypervisorClass(instance.hypervisor)
- # beparams and hvparams are passed separately, to avoid editing the
- # instance and then saving the defaults in the instance itself.
- hvparams = cluster.FillHV(instance)
- beparams = cluster.FillBE(instance)
- console = hyper.GetInstanceConsole(instance, hvparams, beparams)
-
- assert console.instance == instance.name
- assert console.Validate()
-
- return console.ToDict()
-
-
-class _InstanceQuery(_QueryBase):
- FIELDS = query.INSTANCE_FIELDS
-
- def ExpandNames(self, lu):
- lu.needed_locks = {}
- lu.share_locks = _ShareAll()
-
- if self.names:
- self.wanted = _GetWantedInstances(lu, self.names)
- else:
- self.wanted = locking.ALL_SET
-
- self.do_locking = (self.use_locking and
- query.IQ_LIVE in self.requested_data)
- if self.do_locking:
- lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
- lu.needed_locks[locking.LEVEL_NODEGROUP] = []
- lu.needed_locks[locking.LEVEL_NODE] = []
- lu.needed_locks[locking.LEVEL_NETWORK] = []
- lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
-
- self.do_grouplocks = (self.do_locking and
- query.IQ_NODES in self.requested_data)
-
- def DeclareLocks(self, lu, level):
- if self.do_locking:
- if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
- assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
-
- # Lock all groups used by instances optimistically; this requires going
- # via the node before it's locked, requiring verification later on
- lu.needed_locks[locking.LEVEL_NODEGROUP] = \
- set(group_uuid
- for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
- for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
- elif level == locking.LEVEL_NODE:
- lu._LockInstancesNodes() # pylint: disable=W0212
-
- elif level == locking.LEVEL_NETWORK:
- lu.needed_locks[locking.LEVEL_NETWORK] = \
- frozenset(net_uuid
- for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
- for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
-
- @staticmethod
- def _CheckGroupLocks(lu):
- owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
- owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
-
- # Check if node groups for locked instances are still correct
- for instance_name in owned_instances:
- _CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
-
- def _GetQueryData(self, lu):
- """Computes the list of instances and their attributes.
-
- """
- if self.do_grouplocks:
- self._CheckGroupLocks(lu)
-
- cluster = lu.cfg.GetClusterInfo()
- all_info = lu.cfg.GetAllInstancesInfo()
-
- instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
-
- instance_list = [all_info[name] for name in instance_names]
- nodes = frozenset(itertools.chain(*(inst.all_nodes
- for inst in instance_list)))
- hv_list = list(set([inst.hypervisor for inst in instance_list]))
- bad_nodes = []
- offline_nodes = []
- wrongnode_inst = set()
-
- # Gather data as requested
- if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
- live_data = {}
- node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
- for name in nodes:
- result = node_data[name]
- if result.offline:
- # offline nodes will be in both lists
- assert result.fail_msg
- offline_nodes.append(name)
- if result.fail_msg:
- bad_nodes.append(name)
- elif result.payload:
- for inst in result.payload:
- if inst in all_info:
- if all_info[inst].primary_node == name:
- live_data.update(result.payload)
- else:
- wrongnode_inst.add(inst)
- else:
- # orphan instance; we don't list it here as we don't
- # handle this case yet in the output of instance listing
- logging.warning("Orphan instance '%s' found on node %s",
- inst, name)
- # else no instance is alive
- else:
- live_data = {}
-
- if query.IQ_DISKUSAGE in self.requested_data:
- gmi = ganeti.masterd.instance
- disk_usage = dict((inst.name,
- gmi.ComputeDiskSize(inst.disk_template,
- [{constants.IDISK_SIZE: disk.size}
- for disk in inst.disks]))
- for inst in instance_list)
- else:
- disk_usage = None
-
- if query.IQ_CONSOLE in self.requested_data:
- consinfo = {}
- for inst in instance_list:
- if inst.name in live_data:
- # Instance is running
- consinfo[inst.name] = _GetInstanceConsole(cluster, inst)
- else:
- consinfo[inst.name] = None
- assert set(consinfo.keys()) == set(instance_names)
- else:
- consinfo = None
-
- if query.IQ_NODES in self.requested_data:
- node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
- instance_list)))
- nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
- groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
- for uuid in set(map(operator.attrgetter("group"),
- nodes.values())))
- else:
- nodes = None
- groups = None
-
- if query.IQ_NETWORKS in self.requested_data:
- net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
- for i in instance_list))
- networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
- else:
- networks = None
-
- return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
- disk_usage, offline_nodes, bad_nodes,
- live_data, wrongnode_inst, consinfo,
- nodes, groups, networks)
-
-
-class LUInstanceQuery(NoHooksLU):
- """Logical unit for querying instances.
-
- """
- # pylint: disable=W0142
- REQ_BGL = False
-
- def CheckArguments(self):
- self.iq = _InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names),
- self.op.output_fields, self.op.use_locking)
-
- def ExpandNames(self):
- self.iq.ExpandNames(self)
-
- def DeclareLocks(self, level):
- self.iq.DeclareLocks(self, level)
-
- def Exec(self, feedback_fn):
- return self.iq.OldStyleQuery(self)
-
-
-class LUInstanceQueryData(NoHooksLU):
- """Query runtime instance data.
-
- """
- REQ_BGL = False
-
- def ExpandNames(self):
- self.needed_locks = {}
-
- # Use locking if requested or when non-static information is wanted
- if not (self.op.static or self.op.use_locking):
- self.LogWarning("Non-static data requested, locks need to be acquired")
- self.op.use_locking = True
-
- if self.op.instances or not self.op.use_locking:
- # Expand instance names right here
- self.wanted_names = _GetWantedInstances(self, self.op.instances)
- else:
- # Will use acquired locks
- self.wanted_names = None
-
- if self.op.use_locking:
- self.share_locks = _ShareAll()
-
- if self.wanted_names is None:
- self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
- else:
- self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
-
- self.needed_locks[locking.LEVEL_NODEGROUP] = []
- self.needed_locks[locking.LEVEL_NODE] = []
- self.needed_locks[locking.LEVEL_NETWORK] = []
- self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
-
- def DeclareLocks(self, level):
- if self.op.use_locking:
- owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
- if level == locking.LEVEL_NODEGROUP:
-
- # Lock all groups used by instances optimistically; this requires going
- # via the node before it's locked, requiring verification later on
- self.needed_locks[locking.LEVEL_NODEGROUP] = \
- frozenset(group_uuid
- for instance_name in owned_instances
- for group_uuid in
- self.cfg.GetInstanceNodeGroups(instance_name))
-
- elif level == locking.LEVEL_NODE:
- self._LockInstancesNodes()
-
- elif level == locking.LEVEL_NETWORK:
- self.needed_locks[locking.LEVEL_NETWORK] = \
- frozenset(net_uuid
- for instance_name in owned_instances
- for net_uuid in
- self.cfg.GetInstanceNetworks(instance_name))
-
- def CheckPrereq(self):
- """Check prerequisites.
-
- This only checks the optional instance list against the existing names.
-
- """
- owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
- owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
- owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
- owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
-
- if self.wanted_names is None:
- assert self.op.use_locking, "Locking was not used"
- self.wanted_names = owned_instances
-
- instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
-
- if self.op.use_locking:
- _CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
- None)
- else:
- assert not (owned_instances or owned_groups or
- owned_nodes or owned_networks)
-
- self.wanted_instances = instances.values()
-
- def _ComputeBlockdevStatus(self, node, instance, dev):
- """Returns the status of a block device
-
- """
- if self.op.static or not node:
- return None
-
- self.cfg.SetDiskID(dev, node)
-
- result = self.rpc.call_blockdev_find(node, dev)
- if result.offline:
- return None
-
- result.Raise("Can't compute disk status for %s" % instance.name)
-
- status = result.payload
- if status is None:
- return None
-
- return (status.dev_path, status.major, status.minor,
- status.sync_percent, status.estimated_time,
- status.is_degraded, status.ldisk_status)
-
- def _ComputeDiskStatus(self, instance, snode, dev):
- """Compute block device status.
-
- """
- (anno_dev,) = _AnnotateDiskParams(instance, [dev], self.cfg)
-
- return self._ComputeDiskStatusInner(instance, snode, anno_dev)
-
- def _ComputeDiskStatusInner(self, instance, snode, dev):
- """Compute block device status.
-
- @attention: The device has to be annotated already.
-
- """
- if dev.dev_type in constants.LDS_DRBD:
- # we change the snode then (otherwise we use the one passed in)
- if dev.logical_id[0] == instance.primary_node:
- snode = dev.logical_id[1]
- else:
- snode = dev.logical_id[0]
-
- dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
- instance, dev)
- dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
-
- if dev.children:
- dev_children = map(compat.partial(self._ComputeDiskStatusInner,
- instance, snode),
- dev.children)
- else:
- dev_children = []
-
- return {
- "iv_name": dev.iv_name,
- "dev_type": dev.dev_type,
- "logical_id": dev.logical_id,
- "physical_id": dev.physical_id,
- "pstatus": dev_pstatus,
- "sstatus": dev_sstatus,
- "children": dev_children,
- "mode": dev.mode,
- "size": dev.size,
- "name": dev.name,
- "uuid": dev.uuid,
- }
-
- def Exec(self, feedback_fn):
- """Gather and return data"""
- result = {}
-
- cluster = self.cfg.GetClusterInfo()
-
- node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
- nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
-
- groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
- for node in nodes.values()))
-
- group2name_fn = lambda uuid: groups[uuid].name
- for instance in self.wanted_instances:
- pnode = nodes[instance.primary_node]
-
- if self.op.static or pnode.offline:
- remote_state = None
- if pnode.offline:
- self.LogWarning("Primary node %s is marked offline, returning static"
- " information only for instance %s" %
- (pnode.name, instance.name))
- else:
- remote_info = self.rpc.call_instance_info(instance.primary_node,
- instance.name,
- instance.hypervisor)
- remote_info.Raise("Error checking node %s" % instance.primary_node)
- remote_info = remote_info.payload
- if remote_info and "state" in remote_info:
- remote_state = "up"
- else:
- if instance.admin_state == constants.ADMINST_UP:
- remote_state = "down"
- else:
- remote_state = instance.admin_state
-
- disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
- instance.disks)
-
- snodes_group_uuids = [nodes[snode_name].group
- for snode_name in instance.secondary_nodes]
-
- result[instance.name] = {
- "name": instance.name,
- "config_state": instance.admin_state,
- "run_state": remote_state,
- "pnode": instance.primary_node,
- "pnode_group_uuid": pnode.group,
- "pnode_group_name": group2name_fn(pnode.group),
- "snodes": instance.secondary_nodes,
- "snodes_group_uuids": snodes_group_uuids,
- "snodes_group_names": map(group2name_fn, snodes_group_uuids),
- "os": instance.os,
- # this happens to be the same format used for hooks
- "nics": _NICListToTuple(self, instance.nics),
- "disk_template": instance.disk_template,
- "disks": disks,
- "hypervisor": instance.hypervisor,
- "network_port": instance.network_port,
- "hv_instance": instance.hvparams,
- "hv_actual": cluster.FillHV(instance, skip_globals=True),
- "be_instance": instance.beparams,
- "be_actual": cluster.FillBE(instance),
- "os_instance": instance.osparams,
- "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
- "serial_no": instance.serial_no,
- "mtime": instance.mtime,
- "ctime": instance.ctime,
- "uuid": instance.uuid,
- }
-
- return result
-
-
-class LUInstanceStartup(LogicalUnit):
- """Starts an instance.
-
- """
- HPATH = "instance-start"
- HTYPE = constants.HTYPE_INSTANCE
- REQ_BGL = False
-
- def CheckArguments(self):
- # extra beparams
- if self.op.beparams:
- # fill the beparams dict
- objects.UpgradeBeParams(self.op.beparams)
- utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
-
- def ExpandNames(self):
- self._ExpandAndLockInstance()
- self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
-
- def DeclareLocks(self, level):
- if level == locking.LEVEL_NODE_RES:
- self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
-
- def BuildHooksEnv(self):
- """Build hooks env.
-
- This runs on master, primary and secondary nodes of the instance.
-
- """
- env = {
- "FORCE": self.op.force,
- }
-
- env.update(_BuildInstanceHookEnvByObject(self, self.instance))
-
- return env
-
- def BuildHooksNodes(self):
- """Build hooks nodes.
-
- """
- nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
- return (nl, nl)
-
- def CheckPrereq(self):
- """Check prerequisites.
-
- This checks that the instance is in the cluster.
-
- """
- self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
- assert self.instance is not None, \
- "Cannot retrieve locked instance %s" % self.op.instance_name
-
- # extra hvparams
- if self.op.hvparams:
- # check hypervisor parameter syntax (locally)
- cluster = self.cfg.GetClusterInfo()
- utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
- filled_hvp = cluster.FillHV(instance)
- filled_hvp.update(self.op.hvparams)
- hv_type = hypervisor.GetHypervisorClass(instance.hypervisor)
- hv_type.CheckParameterSyntax(filled_hvp)
- _CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
-
- _CheckInstanceState(self, instance, INSTANCE_ONLINE)
-
- self.primary_offline = self.cfg.GetNodeInfo(instance.primary_node).offline
-
- if self.primary_offline and self.op.ignore_offline_nodes:
- self.LogWarning("Ignoring offline primary node")
-
- if self.op.hvparams or self.op.beparams:
- self.LogWarning("Overridden parameters are ignored")
- else:
- _CheckNodeOnline(self, instance.primary_node)
-
- bep = self.cfg.GetClusterInfo().FillBE(instance)
- bep.update(self.op.beparams)
-
- # check bridges existence
- _CheckInstanceBridgesExist(self, instance)
-
- remote_info = self.rpc.call_instance_info(instance.primary_node,
- instance.name,
- instance.hypervisor)
- remote_info.Raise("Error checking node %s" % instance.primary_node,
- prereq=True, ecode=errors.ECODE_ENVIRON)
- if not remote_info.payload: # not running already
- _CheckNodeFreeMemory(self, instance.primary_node,
- "starting instance %s" % instance.name,
- bep[constants.BE_MINMEM], instance.hypervisor)
-
- def Exec(self, feedback_fn):
- """Start the instance.
-
- """
- instance = self.instance
- force = self.op.force
- reason = self.op.reason
-
- if not self.op.no_remember:
- self.cfg.MarkInstanceUp(instance.name)
-
- if self.primary_offline:
- assert self.op.ignore_offline_nodes
- self.LogInfo("Primary node offline, marked instance as started")
- else:
- node_current = instance.primary_node
-
- _StartInstanceDisks(self, instance, force)
-
- result = \
- self.rpc.call_instance_start(node_current,
- (instance, self.op.hvparams,
- self.op.beparams),
- self.op.startup_paused, reason)
- msg = result.fail_msg
- if msg:
- _ShutdownInstanceDisks(self, instance)
- raise errors.OpExecError("Could not start instance: %s" % msg)
-
-
-class LUInstanceShutdown(LogicalUnit):
- """Shutdown an instance.
-
- """
- HPATH = "instance-stop"
- HTYPE = constants.HTYPE_INSTANCE
- REQ_BGL = False
-
- def ExpandNames(self):
- self._ExpandAndLockInstance()
-
- def BuildHooksEnv(self):
- """Build hooks env.
-
- This runs on master, primary and secondary nodes of the instance.
-
- """
- env = _BuildInstanceHookEnvByObject(self, self.instance)
- env["TIMEOUT"] = self.op.timeout
- return env
-
- def BuildHooksNodes(self):
- """Build hooks nodes.
-
- """
- nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
- return (nl, nl)
-
- def CheckPrereq(self):
- """Check prerequisites.
-
- This checks that the instance is in the cluster.
-
- """
- self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
- assert self.instance is not None, \
- "Cannot retrieve locked instance %s" % self.op.instance_name
-
- if not self.op.force:
- _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
- else:
- self.LogWarning("Ignoring offline instance check")
-
- self.primary_offline = \
- self.cfg.GetNodeInfo(self.instance.primary_node).offline
-
- if self.primary_offline and self.op.ignore_offline_nodes:
- self.LogWarning("Ignoring offline primary node")
- else:
- _CheckNodeOnline(self, self.instance.primary_node)
-
- def Exec(self, feedback_fn):
- """Shutdown the instance.
-
- """
- instance = self.instance
- node_current = instance.primary_node
- timeout = self.op.timeout
- reason = self.op.reason
-
- # If the instance is offline we shouldn't mark it as down, as that
- # resets the offline flag.
- if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE:
- self.cfg.MarkInstanceDown(instance.name)
-
- if self.primary_offline:
- assert self.op.ignore_offline_nodes
- self.LogInfo("Primary node offline, marked instance as stopped")
- else:
- result = self.rpc.call_instance_shutdown(node_current, instance, timeout,
- reason)
- msg = result.fail_msg
- if msg:
- self.LogWarning("Could not shutdown instance: %s", msg)
-
- _ShutdownInstanceDisks(self, instance)
-
-
-class LUInstanceReinstall(LogicalUnit):
- """Reinstall an instance.
-
- """
- HPATH = "instance-reinstall"
- HTYPE = constants.HTYPE_INSTANCE
- REQ_BGL = False
-
- def ExpandNames(self):
- self._ExpandAndLockInstance()
-
- def BuildHooksEnv(self):
- """Build hooks env.
-
- This runs on master, primary and secondary nodes of the instance.
-
- """
- return _BuildInstanceHookEnvByObject(self, self.instance)
-
- def BuildHooksNodes(self):
- """Build hooks nodes.
-
- """
- nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
- return (nl, nl)
-
- def CheckPrereq(self):
- """Check prerequisites.
-
- This checks that the instance is in the cluster and is not running.
-
- """
- instance = self.cfg.GetInstanceInfo(self.op.instance_name)
- assert instance is not None, \
- "Cannot retrieve locked instance %s" % self.op.instance_name
- _CheckNodeOnline(self, instance.primary_node, "Instance primary node"
- " offline, cannot reinstall")
-
- if instance.disk_template == constants.DT_DISKLESS:
- raise errors.OpPrereqError("Instance '%s' has no disks" %
- self.op.instance_name,
- errors.ECODE_INVAL)
- _CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall")
-
- if self.op.os_type is not None:
- # OS verification
- pnode = _ExpandNodeName(self.cfg, instance.primary_node)
- _CheckNodeHasOS(self, pnode, self.op.os_type, self.op.force_variant)
- instance_os = self.op.os_type
- else:
- instance_os = instance.os
-
- nodelist = list(instance.all_nodes)
-
- if self.op.osparams:
- i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
- _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
- self.os_inst = i_osdict # the new dict (without defaults)
- else:
- self.os_inst = None
-
- self.instance = instance
-
- def Exec(self, feedback_fn):
- """Reinstall the instance.
-
- """
- inst = self.instance
-
- if self.op.os_type is not None:
- feedback_fn("Changing OS to '%s'..." % self.op.os_type)
- inst.os = self.op.os_type
- # Write to configuration
- self.cfg.Update(inst, feedback_fn)
-
- _StartInstanceDisks(self, inst, None)
- try:
- feedback_fn("Running the instance OS create scripts...")
- # FIXME: pass debug option from opcode to backend
- result = self.rpc.call_instance_os_add(inst.primary_node,
- (inst, self.os_inst), True,
- self.op.debug_level)
- result.Raise("Could not install OS for instance %s on node %s" %
- (inst.name, inst.primary_node))
- finally:
- _ShutdownInstanceDisks(self, inst)
-
-
-class LUInstanceReboot(LogicalUnit):
- """Reboot an instance.
-
- """
- HPATH = "instance-reboot"
- HTYPE = constants.HTYPE_INSTANCE
- REQ_BGL = False
-
- def ExpandNames(self):
- self._ExpandAndLockInstance()
-
- def BuildHooksEnv(self):
- """Build hooks env.
-
- This runs on master, primary and secondary nodes of the instance.
-
- """
- env = {
- "IGNORE_SECONDARIES": self.op.ignore_secondaries,
- "REBOOT_TYPE": self.op.reboot_type,
- "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
- }
-
- env.update(_BuildInstanceHookEnvByObject(self, self.instance))
-
- return env
-
- def BuildHooksNodes(self):
- """Build hooks nodes.
-
- """
- nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes)
- return (nl, nl)
-
- def CheckPrereq(self):
- """Check prerequisites.
-
- This checks that the instance is in the cluster.
-
- """
- self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
- assert self.instance is not None, \
- "Cannot retrieve locked instance %s" % self.op.instance_name
- _CheckInstanceState(self, instance, INSTANCE_ONLINE)
- _CheckNodeOnline(self, instance.primary_node)
-
- # check bridges existence
- _CheckInstanceBridgesExist(self, instance)
-
- def Exec(self, feedback_fn):
- """Reboot the instance.
-
- """
- instance = self.instance
- ignore_secondaries = self.op.ignore_secondaries
- reboot_type = self.op.reboot_type
- reason = self.op.reason
-
- remote_info = self.rpc.call_instance_info(instance.primary_node,
- instance.name,
- instance.hypervisor)
- remote_info.Raise("Error checking node %s" % instance.primary_node)
- instance_running = bool(remote_info.payload)
-
- node_current = instance.primary_node
-
- if instance_running and reboot_type in [constants.INSTANCE_REBOOT_SOFT,
- constants.INSTANCE_REBOOT_HARD]:
- for disk in instance.disks:
- self.cfg.SetDiskID(disk, node_current)
- result = self.rpc.call_instance_reboot(node_current, instance,
- reboot_type,
- self.op.shutdown_timeout, reason)
- result.Raise("Could not reboot instance")
- else:
- if instance_running:
- result = self.rpc.call_instance_shutdown(node_current, instance,
- self.op.shutdown_timeout,
- reason)
- result.Raise("Could not shutdown instance for full reboot")
- _ShutdownInstanceDisks(self, instance)
- else:
- self.LogInfo("Instance %s was already stopped, starting now",
- instance.name)
- _StartInstanceDisks(self, instance, ignore_secondaries)
- result = self.rpc.call_instance_start(node_current,
- (instance, None, None), False,
- reason)
- msg = result.fail_msg
- if msg:
- _ShutdownInstanceDisks(self, instance)
- raise errors.OpExecError("Could not start instance for"
- " full reboot: %s" % msg)
-
- self.cfg.MarkInstanceUp(instance.name)
-
-
-class LUInstanceConsole(NoHooksLU):
- """Connect to an instance's console.
-
- This is somewhat special in that it returns the command line that
- you need to run on the master node in order to connect to the
- console.
-
- """
- REQ_BGL = False
-
- def ExpandNames(self):
- self.share_locks = _ShareAll()
- self._ExpandAndLockInstance()
-
- def CheckPrereq(self):
- """Check prerequisites.
-
- This checks that the instance is in the cluster.
-
- """
- self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
- assert self.instance is not None, \
- "Cannot retrieve locked instance %s" % self.op.instance_name
- _CheckNodeOnline(self, self.instance.primary_node)
-
- def Exec(self, feedback_fn):
- """Connect to the console of an instance
-
- """
- instance = self.instance
- node = instance.primary_node
-
- node_insts = self.rpc.call_instance_list([node],
- [instance.hypervisor])[node]
- node_insts.Raise("Can't get node information from %s" % node)
-
- if instance.name not in node_insts.payload:
- if instance.admin_state == constants.ADMINST_UP:
- state = constants.INSTST_ERRORDOWN
- elif instance.admin_state == constants.ADMINST_DOWN:
- state = constants.INSTST_ADMINDOWN
- else:
- state = constants.INSTST_ADMINOFFLINE
- raise errors.OpExecError("Instance %s is not running (state %s)" %
- (instance.name, state))
-
- logging.debug("Connecting to console of %s on %s", instance.name, node)
-
- return _GetInstanceConsole(self.cfg.GetClusterInfo(), instance)
-
-
class LUInstanceMultiAlloc(NoHooksLU):
"""Allocates multiple instances at the same time.
" pnode/snode while others do not",
errors.ECODE_INVAL)
- if self.op.iallocator is None:
+ if not has_nodes and self.op.iallocator is None:
default_iallocator = self.cfg.GetDefaultIAllocator()
- if default_iallocator and has_nodes:
+ if default_iallocator:
self.op.iallocator = default_iallocator
else:
raise errors.OpPrereqError("No iallocator or nodes on the instances"
"""Calculate the locks.
"""
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
self.needed_locks = {
# iallocator will select nodes and even if no iallocator is used,
# collisions with LUInstanceCreate should be avoided
if self.op.opportunistic_locking:
self.opportunistic_locks[locking.LEVEL_NODE] = True
- self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
else:
nodeslist = []
for inst in self.op.instances:
- inst.pnode = _ExpandNodeName(self.cfg, inst.pnode)
+ inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
nodeslist.append(inst.pnode)
if inst.snode is not None:
- inst.snode = _ExpandNodeName(self.cfg, inst.snode)
+ inst.snode = ExpandNodeName(self.cfg, inst.snode)
nodeslist.append(inst.snode)
self.needed_locks[locking.LEVEL_NODE] = nodeslist
# prevent accidential modification)
self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE_RES and \
+ self.opportunistic_locks[locking.LEVEL_NODE]:
+ # Even when using opportunistic locking, we require the same set of
+ # NODE_RES locks as we got NODE locks
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ self.owned_locks(locking.LEVEL_NODE)
+
def CheckPrereq(self):
"""Check prerequisite.
"""
- cluster = self.cfg.GetClusterInfo()
- default_vg = self.cfg.GetVGName()
- ec_id = self.proc.GetECId()
+ if self.op.iallocator:
+ cluster = self.cfg.GetClusterInfo()
+ default_vg = self.cfg.GetVGName()
+ ec_id = self.proc.GetECId()
- if self.op.opportunistic_locking:
- # Only consider nodes for which a lock is held
- node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
- else:
- node_whitelist = None
+ if self.op.opportunistic_locking:
+ # Only consider nodes for which a lock is held
+ node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
+ else:
+ node_whitelist = None
- insts = [_CreateInstanceAllocRequest(op, _ComputeDisks(op, default_vg),
- _ComputeNics(op, cluster, None,
- self.cfg, ec_id),
- _ComputeFullBeParams(op, cluster),
- node_whitelist)
- for op in self.op.instances]
+ insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
+ _ComputeNics(op, cluster, None,
+ self.cfg, ec_id),
+ _ComputeFullBeParams(op, cluster),
+ node_whitelist)
+ for op in self.op.instances]
- req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
- ial = iallocator.IAllocator(self.cfg, self.rpc, req)
+ req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
+ ial = iallocator.IAllocator(self.cfg, self.rpc, req)
- ial.Run(self.op.iallocator)
+ ial.Run(self.op.iallocator)
- if not ial.success:
- raise errors.OpPrereqError("Can't compute nodes using"
- " iallocator '%s': %s" %
- (self.op.iallocator, ial.info),
- errors.ECODE_NORES)
+ if not ial.success:
+ raise errors.OpPrereqError("Can't compute nodes using"
+ " iallocator '%s': %s" %
+ (self.op.iallocator, ial.info),
+ errors.ECODE_NORES)
- self.ia_result = ial.result
+ self.ia_result = ial.result
if self.op.dry_run:
self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
"""Contructs the partial result.
"""
- (allocatable, failed) = self.ia_result
+ if self.op.iallocator:
+ (allocatable, failed_insts) = self.ia_result
+ allocatable_insts = map(compat.fst, allocatable)
+ else:
+ allocatable_insts = [op.instance_name for op in self.op.instances]
+ failed_insts = []
+
return {
- opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY:
- map(compat.fst, allocatable),
- opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed,
+ opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts,
+ opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts,
}
def Exec(self, feedback_fn):
"""Executes the opcode.
"""
- op2inst = dict((op.instance_name, op) for op in self.op.instances)
- (allocatable, failed) = self.ia_result
-
jobs = []
- for (name, nodes) in allocatable:
- op = op2inst.pop(name)
+ if self.op.iallocator:
+ op2inst = dict((op.instance_name, op) for op in self.op.instances)
+ (allocatable, failed) = self.ia_result
- if len(nodes) > 1:
- (op.pnode, op.snode) = nodes
- else:
- (op.pnode,) = nodes
+ for (name, nodes) in allocatable:
+ op = op2inst.pop(name)
- jobs.append([op])
+ if len(nodes) > 1:
+ (op.pnode, op.snode) = nodes
+ else:
+ (op.pnode,) = nodes
+
+ jobs.append([op])
- missing = set(op2inst.keys()) - set(failed)
- assert not missing, \
- "Iallocator did return incomplete result: %s" % utils.CommaJoin(missing)
+ missing = set(op2inst.keys()) - set(failed)
+ assert not missing, \
+ "Iallocator did return incomplete result: %s" % \
+ utils.CommaJoin(missing)
+ else:
+ jobs.extend([op] for op in self.op.instances)
return ResultWithJobs(jobs, **self._ConstructPartialResult())
self.filled = None
-def PrepareContainerMods(mods, private_fn):
+def _PrepareContainerMods(mods, private_fn):
"""Prepares a list of container modifications by adding a private data field.
@type mods: list of tuples; (operation, index, parameters)
(kind, identifier), errors.ECODE_NOENT)
-def ApplyContainerMods(kind, container, chgdesc, mods,
- create_fn, modify_fn, remove_fn):
+def _ApplyContainerMods(kind, container, chgdesc, mods,
+ create_fn, modify_fn, remove_fn):
"""Applies descriptions in C{mods} to C{container}.
@type kind: string
@type chgdesc: None or list
@param chgdesc: List of applied changes
@type mods: list
- @param mods: Modifications as returned by L{PrepareContainerMods}
+ @param mods: Modifications as returned by L{_PrepareContainerMods}
@type create_fn: callable
@param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
receives absolute item index, parameters and private data object as added
- by L{PrepareContainerMods}, returns tuple containing new item and changes
+ by L{_PrepareContainerMods}, returns tuple containing new item and changes
as list
@type modify_fn: callable
@param modify_fn: Callback for modifying an existing item
(L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
- and private data object as added by L{PrepareContainerMods}, returns
+ and private data object as added by L{_PrepareContainerMods}, returns
changes as list
@type remove_fn: callable
@param remove_fn: Callback on removing item; receives absolute item index,
- item and private data object as added by L{PrepareContainerMods}
+ item and private data object as added by L{_PrepareContainerMods}
"""
for (op, identifier, params, private) in mods:
else:
raise errors.ProgrammerError("Unhandled operation '%s'" % op)
- @staticmethod
- def _VerifyDiskModification(op, params):
+ def _VerifyDiskModification(self, op, params):
"""Verifies a disk modification.
"""
if constants.IDISK_SIZE in params:
raise errors.OpPrereqError("Disk size change not possible, use"
" grow-disk", errors.ECODE_INVAL)
- if len(params) > 2:
- raise errors.OpPrereqError("Disk modification doesn't support"
- " additional arbitrary parameters",
- errors.ECODE_INVAL)
+
+ # Disk modification supports changing only the disk name and mode.
+ # Changing arbitrary parameters is allowed only for ext disk template",
+ if self.instance.disk_template != constants.DT_EXT:
+ utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
+
name = params.get(constants.IDISK_NAME, None)
if name is not None and name.lower() == constants.VALUE_NONE:
params[constants.IDISK_NAME] = None
def CheckArguments(self):
if not (self.op.nics or self.op.disks or self.op.disk_template or
self.op.hvparams or self.op.beparams or self.op.os_name or
- self.op.offline is not None or self.op.runtime_mem or
- self.op.pnode):
+ self.op.osparams or self.op.offline is not None or
+ self.op.runtime_mem or self.op.pnode):
raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
if self.op.hvparams:
- _CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
- "hypervisor", "instance", "cluster")
+ CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
+ "hypervisor", "instance", "cluster")
self.op.disks = self._UpgradeDiskNicMods(
"disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
self._VerifyNicModification)
if self.op.pnode:
- self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
+ self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
def ExpandNames(self):
self._ExpandAndLockInstance()
elif level == locking.LEVEL_NODE:
self._LockInstancesNodes()
if self.op.disk_template and self.op.remote_node:
- self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
+ self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
# Copy node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
- _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+ CopyLockList(self.needed_locks[locking.LEVEL_NODE])
def BuildHooksEnv(self):
"""Build hooks env.
n = copy.deepcopy(nic)
nicparams = self.cluster.SimpleFillNIC(n.nicparams)
n.nicparams = nicparams
- nics.append(_NICToTuple(self, n))
+ nics.append(NICToTuple(self, n))
args["nics"] = nics
- env = _BuildInstanceHookEnvByObject(self, self.instance, override=args)
+ env = BuildInstanceHookEnvByObject(self, self.instance, override=args)
if self.op.disk_template:
env["NEW_DISK_TEMPLATE"] = self.op.disk_template
if self.op.runtime_mem:
new_net_obj.name, errors.ECODE_INVAL)
new_params = dict(netparams)
else:
- new_params = _GetUpdatedParams(old_params, update_params_dict)
+ new_params = GetUpdatedParams(old_params, update_params_dict)
utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
" %s to %s" % (instance.disk_template,
self.op.disk_template),
errors.ECODE_INVAL)
- _CheckInstanceState(self, instance, INSTANCE_DOWN,
- msg="cannot change disk template")
+ CheckInstanceState(self, instance, INSTANCE_DOWN,
+ msg="cannot change disk template")
if self.op.disk_template in constants.DTS_INT_MIRROR:
if self.op.remote_node == pnode:
raise errors.OpPrereqError("Given new secondary node %s is the same"
" as the primary node of the instance" %
self.op.remote_node, errors.ECODE_STATE)
- _CheckNodeOnline(self, self.op.remote_node)
- _CheckNodeNotDrained(self, self.op.remote_node)
+ CheckNodeOnline(self, self.op.remote_node)
+ CheckNodeNotDrained(self, self.op.remote_node)
# FIXME: here we assume that the old instance type is DT_PLAIN
assert instance.disk_template == constants.DT_PLAIN
disks = [{constants.IDISK_SIZE: d.size,
constants.IDISK_VG: d.logical_id[0]}
for d in instance.disks]
- required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
- _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
+ required = ComputeDiskSizePerVG(self.op.disk_template, disks)
+ CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
snode_group = self.cfg.GetNodeGroup(snode_info.group)
ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
snode_group)
- _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
- ignore=self.op.ignore_ipolicy)
+ CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
+ ignore=self.op.ignore_ipolicy)
if pnode_info.group != snode_info.group:
self.LogWarning("The primary and secondary nodes are in two"
" different node groups; the disk parameters"
if self.op.disk_template in constants.DTS_INT_MIRROR:
assert snode_info
nodes.append(snode_info)
- has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+ has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
if compat.any(map(has_es, nodes)):
errmsg = ("Cannot convert disk template from %s to %s when exclusive"
" storage is enabled" % (instance.disk_template,
self._VerifyDiskModification)
# Prepare disk/NIC modifications
- self.diskmod = PrepareContainerMods(self.op.disks, None)
- self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
+ self.diskmod = _PrepareContainerMods(self.op.disks, None)
+ self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
# Check the validity of the `provider' parameter
if instance.disk_template in constants.DT_EXT:
# OS change
if self.op.os_name and not self.op.force:
- _CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
- self.op.force_variant)
+ CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
+ self.op.force_variant)
instance_os = self.op.os_name
else:
instance_os = instance.os
# hvparams processing
if self.op.hvparams:
hv_type = instance.hypervisor
- i_hvdict = _GetUpdatedParams(instance.hvparams, self.op.hvparams)
+ i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
# local check
hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
- _CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
+ CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
self.hv_proposed = self.hv_new = hv_new # the new actual values
self.hv_inst = i_hvdict # the new dict (without defaults)
else:
# beparams processing
if self.op.beparams:
- i_bedict = _GetUpdatedParams(instance.beparams, self.op.beparams,
- use_none=True)
+ i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
+ use_none=True)
objects.UpgradeBeParams(i_bedict)
utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
be_new = cluster.SimpleFillBE(i_bedict)
# osparams processing
if self.op.osparams:
- i_osdict = _GetUpdatedParams(instance.osparams, self.op.osparams)
- _CheckOSParams(self, True, nodelist, instance_os, i_osdict)
+ i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
+ CheckOSParams(self, True, nodelist, instance_os, i_osdict)
self.os_inst = i_osdict # the new dict (without defaults)
else:
self.os_inst = {}
delta = self.op.runtime_mem - current_memory
if delta > 0:
- _CheckNodeFreeMemory(self, instance.primary_node,
- "ballooning memory for instance %s" %
- instance.name, delta, instance.hypervisor)
+ CheckNodeFreeMemory(self, instance.primary_node,
+ "ballooning memory for instance %s" %
+ instance.name, delta, instance.hypervisor)
if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
raise errors.OpPrereqError("Disk operations not supported for"
# Verify NIC changes (operating on copy)
nics = instance.nics[:]
- ApplyContainerMods("NIC", nics, None, self.nicmod,
- _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
+ _ApplyContainerMods("NIC", nics, None, self.nicmod,
+ _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
if len(nics) > constants.MAX_NICS:
raise errors.OpPrereqError("Instance has too many network interfaces"
" (%d), cannot add more" % constants.MAX_NICS,
# Verify disk changes (operating on a copy)
disks = copy.deepcopy(instance.disks)
- ApplyContainerMods("disk", disks, None, self.diskmod, None, _PrepareDiskMod,
- None)
+ _ApplyContainerMods("disk", disks, None, self.diskmod, None,
+ _PrepareDiskMod, None)
utils.ValidateDeviceNames("disk", disks)
if len(disks) > constants.MAX_DISKS:
raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
if self.op.offline is not None and self.op.offline:
- _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
- msg="can't change to offline")
+ CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
+ msg="can't change to offline")
# Pre-compute NIC changes (necessary to use result in hooks)
self._nic_chgdesc = []
if self.nicmod:
# Operate on copies as this is still in prereq
nics = [nic.Copy() for nic in instance.nics]
- ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
- self._CreateNewNic, self._ApplyNicMods, None)
+ _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
+ self._CreateNewNic, self._ApplyNicMods,
+ self._RemoveNic)
# Verify that NIC names are unique and valid
utils.ValidateDeviceNames("NIC", nics)
self._new_nics = nics
constants.IDISK_VG: d.logical_id[0],
constants.IDISK_NAME: d.name}
for d in instance.disks]
- new_disks = _GenerateDiskTemplate(self, self.op.disk_template,
- instance.name, pnode, [snode],
- disk_info, None, None, 0, feedback_fn,
- self.diskparams)
+ new_disks = GenerateDiskTemplate(self, self.op.disk_template,
+ instance.name, pnode, [snode],
+ disk_info, None, None, 0, feedback_fn,
+ self.diskparams)
anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
self.diskparams)
- p_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
- s_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, snode)
- info = _GetInstanceInfoText(instance)
+ p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
+ s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
+ info = GetInstanceInfoText(instance)
feedback_fn("Creating additional volumes...")
# first, create the missing data and meta devices
for disk in anno_disks:
# unfortunately this is... not too nice
- _CreateSingleBlockDev(self, pnode, instance, disk.children[1],
- info, True, p_excl_stor)
+ CreateSingleBlockDev(self, pnode, instance, disk.children[1],
+ info, True, p_excl_stor)
for child in disk.children:
- _CreateSingleBlockDev(self, snode, instance, child, info, True,
- s_excl_stor)
+ CreateSingleBlockDev(self, snode, instance, child, info, True,
+ s_excl_stor)
# at this stage, all new LVs have been created, we can rename the
# old ones
feedback_fn("Renaming original volumes...")
for disk in anno_disks:
for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
f_create = node == pnode
- _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
- excl_stor)
+ CreateSingleBlockDev(self, node, instance, disk, info, f_create,
+ excl_stor)
except errors.GenericError, e:
feedback_fn("Initializing of DRBD devices failed;"
" renaming back original volumes...")
self.cfg.Update(instance, feedback_fn)
# Release node locks while waiting for sync
- _ReleaseLocks(self, locking.LEVEL_NODE)
+ ReleaseLocks(self, locking.LEVEL_NODE)
# disks are created, waiting for sync
- disk_abort = not _WaitForSync(self, instance,
- oneshot=not self.op.wait_for_sync)
+ disk_abort = not WaitForSync(self, instance,
+ oneshot=not self.op.wait_for_sync)
if disk_abort:
raise errors.OpExecError("There are some degraded disks for"
" this instance, please cleanup manually")
snode = instance.secondary_nodes[0]
feedback_fn("Converting template to plain")
- old_disks = _AnnotateDiskParams(instance, instance.disks, self.cfg)
+ old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
new_disks = [d.children[0] for d in instance.disks]
# copy over size, mode and name
self.cfg.Update(instance, feedback_fn)
# Release locks in case removing disks takes a while
- _ReleaseLocks(self, locking.LEVEL_NODE)
+ ReleaseLocks(self, locking.LEVEL_NODE)
feedback_fn("Removing volumes on the secondary node...")
for disk in old_disks:
self.LogWarning("Could not remove metadata for disk %d on node %s,"
" continuing anyway: %s", idx, pnode, msg)
+ def _HotplugDevice(self, action, dev_type, device, extra, seq):
+ self.LogInfo("Trying to hotplug device...")
+ result = self.rpc.call_hotplug_device(self.instance.primary_node,
+ self.instance, action, dev_type,
+ device, extra, seq)
+ if result.fail_msg:
+ self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
+ self.LogInfo("Continuing execution..")
+ else:
+ self.LogInfo("Hotplug done.")
+
def _CreateNewDisk(self, idx, params, _):
"""Creates a new disk.
file_driver = file_path = None
disk = \
- _GenerateDiskTemplate(self, instance.disk_template, instance.name,
- instance.primary_node, instance.secondary_nodes,
- [params], file_path, file_driver, idx,
- self.Log, self.diskparams)[0]
-
- info = _GetInstanceInfoText(instance)
-
- logging.info("Creating volume %s for instance %s",
- disk.iv_name, instance.name)
- # Note: this needs to be kept in sync with _CreateDisks
- #HARDCODE
- for node in instance.all_nodes:
- f_create = (node == instance.primary_node)
- try:
- _CreateBlockDev(self, node, instance, disk, f_create, info, f_create)
- except errors.OpExecError, err:
- self.LogWarning("Failed to create volume %s (%s) on node '%s': %s",
- disk.iv_name, disk, node, err)
+ GenerateDiskTemplate(self, instance.disk_template, instance.name,
+ instance.primary_node, instance.secondary_nodes,
+ [params], file_path, file_driver, idx,
+ self.Log, self.diskparams)[0]
+
+ new_disks = CreateDisks(self, instance, disks=[disk])
if self.cluster.prealloc_wipe_disks:
# Wipe new disk
- _WipeDisks(self, instance,
- disks=[(idx, disk, 0)])
+ WipeOrCleanupDisks(self, instance,
+ disks=[(idx, disk, 0)],
+ cleanup=new_disks)
+
+ if self.op.hotplug:
+ self.cfg.SetDiskID(disk, self.instance.primary_node)
+ result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
+ (disk, self.instance),
+ self.instance.name, True, idx)
+ if result.fail_msg:
+ self.LogWarning("Can't assemble newly created disk %d: %s",
+ idx, result.fail_msg)
+ else:
+ _, link_name = result.payload
+ self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
+ constants.HOTPLUG_TARGET_DISK,
+ disk, link_name, idx)
return (disk, [
("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
])
- @staticmethod
- def _ModifyDisk(idx, disk, params, _):
+ def _ModifyDisk(self, idx, disk, params, _):
"""Modifies a disk.
"""
changes = []
- mode = params.get(constants.IDISK_MODE, None)
- if mode:
- disk.mode = mode
+ if constants.IDISK_MODE in params:
+ disk.mode = params.get(constants.IDISK_MODE)
changes.append(("disk.mode/%d" % idx, disk.mode))
- name = params.get(constants.IDISK_NAME, None)
- disk.name = name
- changes.append(("disk.name/%d" % idx, disk.name))
+ if constants.IDISK_NAME in params:
+ disk.name = params.get(constants.IDISK_NAME)
+ changes.append(("disk.name/%d" % idx, disk.name))
+
+ # Modify arbitrary params in case instance template is ext
+ for key, value in params.iteritems():
+ if (key not in constants.MODIFIABLE_IDISK_PARAMS and
+ self.instance.disk_template == constants.DT_EXT):
+ # stolen from GetUpdatedParams: default means reset/delete
+ if value.lower() == constants.VALUE_DEFAULT:
+ try:
+ del disk.params[key]
+ except KeyError:
+ pass
+ else:
+ disk.params[key] = value
+ changes.append(("disk.params:%s/%d" % (key, idx), value))
return changes
"""Removes a disk.
"""
- (anno_disk,) = _AnnotateDiskParams(self.instance, [root], self.cfg)
+ if self.op.hotplug:
+ self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
+ constants.HOTPLUG_TARGET_DISK,
+ root, None, idx)
+ ShutdownInstanceDisks(self, self.instance, [root])
+
+ (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
self.cfg.SetDiskID(disk, node)
msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
nicparams=nicparams)
nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
- return (nobj, [
+ if self.op.hotplug:
+ self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
+ constants.HOTPLUG_TARGET_NIC,
+ nobj, None, idx)
+
+ desc = [
("nic.%d" % idx,
"add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
(mac, ip, private.filled[constants.NIC_MODE],
- private.filled[constants.NIC_LINK],
- net)),
- ])
+ private.filled[constants.NIC_LINK], net)),
+ ]
+
+ return (nobj, desc)
def _ApplyNicMods(self, idx, nic, params, private):
"""Modifies a network interface.
for (key, val) in nic.nicparams.items():
changes.append(("nic.%s/%d" % (key, idx), val))
+ if self.op.hotplug:
+ self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
+ constants.HOTPLUG_TARGET_NIC,
+ nic, None, idx)
+
return changes
+ def _RemoveNic(self, idx, nic, _):
+ if self.op.hotplug:
+ self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
+ constants.HOTPLUG_TARGET_NIC,
+ nic, None, idx)
+
def Exec(self, feedback_fn):
"""Modifies an instance.
result.append(("runtime_memory", self.op.runtime_mem))
# Apply disk changes
- ApplyContainerMods("disk", instance.disks, result, self.diskmod,
- self._CreateNewDisk, self._ModifyDisk, self._RemoveDisk)
+ _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
+ self._CreateNewDisk, self._ModifyDisk,
+ self._RemoveDisk)
_UpdateIvNames(0, instance.disks)
if self.op.disk_template:
("Not owning the correct locks, owning %r, expected at least %r" %
(owned, check_nodes))
- r_shut = _ShutdownInstanceDisks(self, instance)
+ r_shut = ShutdownInstanceDisks(self, instance)
if not r_shut:
raise errors.OpExecError("Cannot shutdown instance disks, unable to"
" proceed with disk template conversion")
# Release node and resource locks if there are any (they might already have
# been released during disk conversion)
- _ReleaseLocks(self, locking.LEVEL_NODE)
- _ReleaseLocks(self, locking.LEVEL_NODE_RES)
+ ReleaseLocks(self, locking.LEVEL_NODE)
+ ReleaseLocks(self, locking.LEVEL_NODE_RES)
# Apply NIC changes
if self._new_nics is not None:
REQ_BGL = False
def ExpandNames(self):
- self.share_locks = _ShareAll()
+ self.share_locks = ShareAll()
self.needed_locks = {
locking.LEVEL_NODEGROUP: [],
else:
self.req_target_uuids = None
- self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
+ self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
def DeclareLocks(self, level):
if level == locking.LEVEL_NODEGROUP:
("Instance %s's nodes changed while we kept the lock" %
self.op.instance_name)
- inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
- owned_groups)
+ inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
+ owned_groups)
if self.req_target_uuids:
# User requested specific target groups
"TARGET_GROUPS": " ".join(self.target_uuids),
}
- env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+ env.update(BuildInstanceHookEnvByObject(self, self.instance))
return env
(self.op.instance_name, self.op.iallocator,
ial.info), errors.ECODE_NORES)
- jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
+ jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
self.LogInfo("Iallocator returned %s job(s) for changing group of"
" instance '%s'", len(jobs), self.op.instance_name)