from ganeti import objects
from ganeti import pathutils
from ganeti import utils
-from ganeti.cmdlib.common import _AnnotateDiskParams, \
- _ComputeIPolicyInstanceViolation
+from ganeti.cmdlib.common import AnnotateDiskParams, \
+ ComputeIPolicyInstanceViolation
-def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
- minmem, maxmem, vcpus, nics, disk_template, disks,
- bep, hvp, hypervisor_name, tags):
+def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names, os_type,
+ status, minmem, maxmem, vcpus, nics, disk_template,
+ disks, bep, hvp, hypervisor_name, tags):
"""Builds instance related env variables for hooks
This builds the hook environment from individual variables.
@type name: string
@param name: the name of the instance
- @type primary_node: string
- @param primary_node: the name of the instance's primary node
- @type secondary_nodes: list
- @param secondary_nodes: list of secondary nodes as strings
+ @type primary_node_name: string
+ @param primary_node_name: the name of the instance's primary node
+ @type secondary_node_names: list
+ @param secondary_node_names: list of secondary nodes as strings
@type os_type: string
@param os_type: the name of the instance's OS
@type status: string
env = {
"OP_TARGET": name,
"INSTANCE_NAME": name,
- "INSTANCE_PRIMARY": primary_node,
- "INSTANCE_SECONDARIES": " ".join(secondary_nodes),
+ "INSTANCE_PRIMARY": primary_node_name,
+ "INSTANCE_SECONDARIES": " ".join(secondary_node_names),
"INSTANCE_OS_TYPE": os_type,
"INSTANCE_STATUS": status,
"INSTANCE_MINMEM": minmem,
}
if nics:
nic_count = len(nics)
- for idx, (name, _, ip, mac, mode, link, net, netinfo) in enumerate(nics):
+ for idx, (name, uuid, ip, mac, mode, link, net, netinfo) in enumerate(nics):
if ip is None:
ip = ""
- env["INSTANCE_NIC%d_NAME" % idx] = name
+ if name:
+ env["INSTANCE_NIC%d_NAME" % idx] = name
+ env["INSTANCE_NIC%d_UUID" % idx] = uuid
env["INSTANCE_NIC%d_IP" % idx] = ip
env["INSTANCE_NIC%d_MAC" % idx] = mac
env["INSTANCE_NIC%d_MODE" % idx] = mode
if disks:
disk_count = len(disks)
- for idx, (name, size, mode) in enumerate(disks):
- env["INSTANCE_DISK%d_NAME" % idx] = name
+ for idx, (name, uuid, size, mode) in enumerate(disks):
+ if name:
+ env["INSTANCE_DISK%d_NAME" % idx] = name
+ env["INSTANCE_DISK%d_UUID" % idx] = uuid
env["INSTANCE_DISK%d_SIZE" % idx] = size
env["INSTANCE_DISK%d_MODE" % idx] = mode
else:
return env
-def _BuildInstanceHookEnvByObject(lu, instance, override=None):
+def BuildInstanceHookEnvByObject(lu, instance, override=None):
"""Builds instance related env variables for hooks from an object.
@type lu: L{LogicalUnit}
hvp = cluster.FillHV(instance)
args = {
"name": instance.name,
- "primary_node": instance.primary_node,
- "secondary_nodes": instance.secondary_nodes,
+ "primary_node_name": lu.cfg.GetNodeName(instance.primary_node),
+ "secondary_node_names": lu.cfg.GetNodeNames(instance.secondary_nodes),
"os_type": instance.os,
"status": instance.admin_state,
"maxmem": bep[constants.BE_MAXMEM],
"minmem": bep[constants.BE_MINMEM],
"vcpus": bep[constants.BE_VCPUS],
- "nics": _NICListToTuple(lu, instance.nics),
+ "nics": NICListToTuple(lu, instance.nics),
"disk_template": instance.disk_template,
- "disks": [(disk.name, disk.size, disk.mode)
+ "disks": [(disk.name, disk.uuid, disk.size, disk.mode)
for disk in instance.disks],
"bep": bep,
"hvp": hvp,
}
if override:
args.update(override)
- return _BuildInstanceHookEnv(**args) # pylint: disable=W0142
+ return BuildInstanceHookEnv(**args) # pylint: disable=W0142
-def _GetClusterDomainSecret():
+def GetClusterDomainSecret():
"""Reads the cluster domain secret.
"""
strict=True)
-def _CheckNodeNotDrained(lu, node):
+def CheckNodeNotDrained(lu, node_uuid):
"""Ensure that a given node is not drained.
@param lu: the LU on behalf of which we make the check
- @param node: the node to check
+ @param node_uuid: the node to check
@raise errors.OpPrereqError: if the node is drained
"""
- if lu.cfg.GetNodeInfo(node).drained:
- raise errors.OpPrereqError("Can't use drained node %s" % node,
+ if lu.cfg.GetNodeInfo(node_uuid).drained:
+ raise errors.OpPrereqError("Can't use drained node %s" % node_uuid,
errors.ECODE_STATE)
-def _CheckNodeVmCapable(lu, node):
+def CheckNodeVmCapable(lu, node_uuid):
"""Ensure that a given node is vm capable.
@param lu: the LU on behalf of which we make the check
- @param node: the node to check
+ @param node_uuid: the node to check
@raise errors.OpPrereqError: if the node is not vm capable
"""
- if not lu.cfg.GetNodeInfo(node).vm_capable:
- raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node,
+ if not lu.cfg.GetNodeInfo(node_uuid).vm_capable:
+ raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node_uuid,
errors.ECODE_STATE)
-def _RemoveInstance(lu, feedback_fn, instance, ignore_failures):
+def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
"""Utility function to remove an instance.
"""
logging.info("Removing block devices for instance %s", instance.name)
- if not _RemoveDisks(lu, instance, ignore_failures=ignore_failures):
+ if not RemoveDisks(lu, instance, ignore_failures=ignore_failures):
if not ignore_failures:
raise errors.OpExecError("Can't remove instance's disks")
feedback_fn("Warning: can't remove instance's disks")
logging.info("Removing instance %s out of cluster config", instance.name)
- lu.cfg.RemoveInstance(instance.name)
+ lu.cfg.RemoveInstance(instance.uuid)
assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
"Instance lock removal conflict"
lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
-def _RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
+def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False):
"""Remove all disks for an instance.
This abstracts away some work from `AddInstance()` and
@param lu: the logical unit on whose behalf we execute
@type instance: L{objects.Instance}
@param instance: the instance whose disks we should remove
- @type target_node: string
- @param target_node: used to override the node on which to remove the disks
+ @type target_node_uuid: string
+ @param target_node_uuid: used to override the node on which to remove the
+ disks
@rtype: boolean
@return: the success of the removal
all_result = True
ports_to_release = set()
- anno_disks = _AnnotateDiskParams(instance, instance.disks, lu.cfg)
+ anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg)
for (idx, device) in enumerate(anno_disks):
- if target_node:
- edata = [(target_node, device)]
+ if target_node_uuid:
+ edata = [(target_node_uuid, device)]
else:
edata = device.ComputeNodeTree(instance.primary_node)
- for node, disk in edata:
- lu.cfg.SetDiskID(disk, node)
- result = lu.rpc.call_blockdev_remove(node, disk)
+ for node_uuid, disk in edata:
+ lu.cfg.SetDiskID(disk, node_uuid)
+ result = lu.rpc.call_blockdev_remove(node_uuid, disk)
if result.fail_msg:
lu.LogWarning("Could not remove disk %s on node %s,"
- " continuing anyway: %s", idx, node, result.fail_msg)
- if not (result.offline and node != instance.primary_node):
+ " continuing anyway: %s", idx,
+ lu.cfg.GetNodeName(node_uuid), result.fail_msg)
+ if not (result.offline and node_uuid != instance.primary_node):
all_result = False
# if this is a DRBD disk, return its port to the pool
if instance.disk_template in constants.DTS_FILEBASED:
file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
- if target_node:
- tgt = target_node
+ if target_node_uuid:
+ tgt = target_node_uuid
else:
tgt = instance.primary_node
result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
if result.fail_msg:
lu.LogWarning("Could not remove directory '%s' on node %s: %s",
- file_storage_dir, instance.primary_node, result.fail_msg)
+ file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg)
all_result = False
return all_result
-def _NICToTuple(lu, nic):
+def NICToTuple(lu, nic):
"""Build a tupple of nic information.
@type lu: L{LogicalUnit}
return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, nic.network, netinfo)
-def _NICListToTuple(lu, nics):
+def NICListToTuple(lu, nics):
"""Build a list of nic information tuples.
This list is suitable to be passed to _BuildInstanceHookEnv or as a return
"""
hooks_nics = []
for nic in nics:
- hooks_nics.append(_NICToTuple(lu, nic))
+ hooks_nics.append(NICToTuple(lu, nic))
return hooks_nics
-def _CopyLockList(names):
+def CopyLockList(names):
"""Makes a copy of a list of lock names.
Handles L{locking.ALL_SET} correctly.
return names[:]
-def _ReleaseLocks(lu, level, names=None, keep=None):
+def ReleaseLocks(lu, level, names=None, keep=None):
"""Releases locks owned by an LU.
@type lu: L{LogicalUnit}
def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group,
target_group, cfg,
- _compute_fn=_ComputeIPolicyInstanceViolation):
+ _compute_fn=ComputeIPolicyInstanceViolation):
"""Compute if instance meets the specs of the new target group.
@param ipolicy: The ipolicy to verify
@type cfg: L{config.ConfigWriter}
@param cfg: Cluster configuration
@param _compute_fn: The function to verify ipolicy (unittest only)
- @see: L{ganeti.cmdlib.common._ComputeIPolicySpecViolation}
+ @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
"""
if current_group == target_group:
return _compute_fn(ipolicy, instance, cfg)
-def _CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
- _compute_fn=_ComputeIPolicyNodeViolation):
+def CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False,
+ _compute_fn=_ComputeIPolicyNodeViolation):
"""Checks that the target node is correct in terms of instance policy.
@param ipolicy: The ipolicy to verify
@param cfg: Cluster configuration
@param ignore: Ignore violations of the ipolicy
@param _compute_fn: The function to verify ipolicy (unittest only)
- @see: L{ganeti.cmdlib.common._ComputeIPolicySpecViolation}
+ @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
"""
primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
-def _GetInstanceInfoText(instance):
+def GetInstanceInfoText(instance):
"""Compute that text that should be added to the disk's metadata.
"""
return "originstname+%s" % instance.name
-def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
+def CheckNodeFreeMemory(lu, node_uuid, reason, requested, hvname, hvparams):
"""Checks if a node has enough free memory.
This function checks if a given node has the needed amount of free
@type lu: C{LogicalUnit}
@param lu: a logical unit from which we get configuration data
- @type node: C{str}
- @param node: the node to check
+ @type node_uuid: C{str}
+ @param node_uuid: the node to check
@type reason: C{str}
@param reason: string to use in the error message
@type requested: C{int}
@param requested: the amount of memory in MiB to check for
- @type hypervisor_name: C{str}
- @param hypervisor_name: the hypervisor to ask for memory stats
+ @type hvname: string
+ @param hvname: the hypervisor's name
+ @type hvparams: dict of strings
+ @param hvparams: the hypervisor's parameters
@rtype: integer
@return: node current free memory
@raise errors.OpPrereqError: if the node doesn't have enough memory, or
we cannot check the node
"""
- nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name], False)
- nodeinfo[node].Raise("Can't get data from node %s" % node,
- prereq=True, ecode=errors.ECODE_ENVIRON)
- (_, _, (hv_info, )) = nodeinfo[node].payload
+ node_name = lu.cfg.GetNodeName(node_uuid)
+ nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)])
+ nodeinfo[node_uuid].Raise("Can't get data from node %s" % node_name,
+ prereq=True, ecode=errors.ECODE_ENVIRON)
+ (_, _, (hv_info, )) = nodeinfo[node_uuid].payload
free_mem = hv_info.get("memory_free", None)
if not isinstance(free_mem, int):
raise errors.OpPrereqError("Can't compute free memory on node %s, result"
- " was '%s'" % (node, free_mem),
+ " was '%s'" % (node_name, free_mem),
errors.ECODE_ENVIRON)
if requested > free_mem:
raise errors.OpPrereqError("Not enough memory on node %s for %s:"
" needed %s MiB, available %s MiB" %
- (node, reason, requested, free_mem),
+ (node_name, reason, requested, free_mem),
errors.ECODE_NORES)
return free_mem
-def _CheckInstanceBridgesExist(lu, instance, node=None):
+def CheckInstanceBridgesExist(lu, instance, node_uuid=None):
"""Check that the brigdes needed by an instance exist.
"""
- if node is None:
- node = instance.primary_node
- _CheckNicsBridgesExist(lu, instance.nics, node)
+ if node_uuid is None:
+ node_uuid = instance.primary_node
+ CheckNicsBridgesExist(lu, instance.nics, node_uuid)
-def _CheckNicsBridgesExist(lu, target_nics, target_node):
+def CheckNicsBridgesExist(lu, nics, node_uuid):
"""Check that the brigdes needed by a list of nics exist.
"""
cluster = lu.cfg.GetClusterInfo()
- paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics]
+ paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in nics]
brlist = [params[constants.NIC_LINK] for params in paramslist
if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED]
if brlist:
- result = lu.rpc.call_bridges_exist(target_node, brlist)
+ result = lu.rpc.call_bridges_exist(node_uuid, brlist)
result.Raise("Error checking bridges on destination node '%s'" %
- target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
+ lu.cfg.GetNodeName(node_uuid), prereq=True,
+ ecode=errors.ECODE_ENVIRON)
+
+
+def CheckNodeHasOS(lu, node_uuid, os_name, force_variant):
+ """Ensure that a node supports a given OS.
+
+ @param lu: the LU on behalf of which we make the check
+ @param node_uuid: the node to check
+ @param os_name: the OS to query about
+ @param force_variant: whether to ignore variant errors
+ @raise errors.OpPrereqError: if the node is not supporting the OS
+
+ """
+ result = lu.rpc.call_os_get(node_uuid, os_name)
+ result.Raise("OS '%s' not in supported OS list for node %s" %
+ (os_name, lu.cfg.GetNodeName(node_uuid)),
+ prereq=True, ecode=errors.ECODE_INVAL)
+ if not force_variant:
+ _CheckOSVariant(result.payload, os_name)
+
+
+def _CheckOSVariant(os_obj, name):
+ """Check whether an OS name conforms to the os variants specification.
+
+ @type os_obj: L{objects.OS}
+ @param os_obj: OS object to check
+ @type name: string
+ @param name: OS name passed by the user, to check for validity
+
+ """
+ variant = objects.OS.GetVariant(name)
+ if not os_obj.supported_variants:
+ if variant:
+ raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
+ " passed)" % (os_obj.name, variant),
+ errors.ECODE_INVAL)
+ return
+ if not variant:
+ raise errors.OpPrereqError("OS name must include a variant",
+ errors.ECODE_INVAL)
+
+ if variant not in os_obj.supported_variants:
+ raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)