self.owned_locks = context.glm.list_owned
self.context = context
self.rpc = rpc_runner
- # Dicts used to declare locking needs to mcpu
+
+ # Dictionaries used to declare locking needs to mcpu
self.needed_locks = None
self.share_locks = dict.fromkeys(locking.LEVELS, 0)
+ self.opportunistic_locks = dict.fromkeys(locking.LEVELS, False)
+
self.add_locks = {}
self.remove_locks = {}
+
# Used to force good behavior when calling helper functions
self.recalculate_locks = {}
+
# logging
self.Log = processor.Log # pylint: disable=C0103
self.LogWarning = processor.LogWarning # pylint: disable=C0103
return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
+def _IsExclusiveStorageEnabledNode(cfg, node):
+ """Whether exclusive_storage is in effect for the given node.
+
+ @type cfg: L{config.ConfigWriter}
+ @param cfg: The cluster configuration
+ @type node: L{objects.Node}
+ @param node: The node
+ @rtype: bool
+ @return: The effective value of exclusive_storage
+
+ """
+ return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
+
+
+def _IsExclusiveStorageEnabledNodeName(cfg, nodename):
+ """Whether exclusive_storage is in effect for the given node.
+
+ @type cfg: L{config.ConfigWriter}
+ @param cfg: The cluster configuration
+ @type nodename: string
+ @param nodename: The node
+ @rtype: bool
+ @return: The effective value of exclusive_storage
+ @raise errors.OpPrereqError: if no node exists with the given name
+
+ """
+ ni = cfg.GetNodeInfo(nodename)
+ if ni is None:
+ raise errors.OpPrereqError("Invalid node name %s" % nodename,
+ errors.ECODE_NOENT)
+ return _IsExclusiveStorageEnabledNode(cfg, ni)
+
+
def _CopyLockList(names):
"""Makes a copy of a list of lock names.
return env
-def _BuildNetworkHookEnvByObject(net):
- """Builds network related env varliables for hooks
-
- @type net: L{objects.Network}
- @param net: the network object
-
- """
- args = {
- "name": net.name,
- "subnet": net.network,
- "gateway": net.gateway,
- "network6": net.network6,
- "gateway6": net.gateway6,
- "network_type": net.network_type,
- "mac_prefix": net.mac_prefix,
- "tags": net.tags,
- }
-
- return _BuildNetworkHookEnv(**args) # pylint: disable=W0142
-
-
def _BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status,
minmem, maxmem, vcpus, nics, disk_template, disks,
bep, hvp, hypervisor_name, tags):
msg = ("hypervisor %s parameters syntax check (source %s): %%s" %
(item, hv_name))
try:
- hv_class = hypervisor.GetHypervisor(hv_name)
+ hv_class = hypervisor.GetHypervisorClass(hv_name)
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
hv_class.CheckParameterSyntax(hv_params)
except errors.GenericError, err:
@ivar oslist: list of OSes as diagnosed by DiagnoseOS
@type vm_capable: boolean
@ivar vm_capable: whether the node can host instances
+ @type pv_min: float
+ @ivar pv_min: size in MiB of the smallest PVs
+ @type pv_max: float
+ @ivar pv_max: size in MiB of the biggest PVs
"""
def __init__(self, offline=False, name=None, vm_capable=True):
self.ghost = False
self.os_fail = False
self.oslist = {}
+ self.pv_min = None
+ self.pv_max = None
def ExpandNames(self):
# This raises errors.OpPrereqError on its own:
locking.LEVEL_INSTANCE: inst_names,
locking.LEVEL_NODEGROUP: [self.group_uuid],
locking.LEVEL_NODE: [],
+
+ # This opcode is run by watcher every five minutes and acquires all nodes
+ # for a group. It doesn't run for a long time, so it's better to acquire
+ # the node allocation lock as well.
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}
self.share_locks = _ShareAll()
"Node time diverges by at least %s from master node time",
ntime_diff)
- def _VerifyNodeLVM(self, ninfo, nresult, vg_name):
- """Check the node LVM results.
+ def _UpdateVerifyNodeLVM(self, ninfo, nresult, vg_name, nimg):
+ """Check the node LVM results and update info for cross-node checks.
@type ninfo: L{objects.Node}
@param ninfo: the node to check
@param nresult: the remote results for the node
@param vg_name: the configured VG name
+ @type nimg: L{NodeImage}
+ @param nimg: node image
"""
if vg_name is None:
constants.MIN_VG_SIZE)
_ErrorIf(vgstatus, constants.CV_ENODELVM, node, vgstatus)
- # check pv names
- pvlist = nresult.get(constants.NV_PVLIST, None)
- test = pvlist is None
+ # check pv names (and possibly sizes)
+ pvlist_dict = nresult.get(constants.NV_PVLIST, None)
+ test = pvlist_dict is None
_ErrorIf(test, constants.CV_ENODELVM, node, "Can't get PV list from node")
if not test:
+ pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
# check that ':' is not present in PV names, since it's a
# special character for lvcreate (denotes the range of PEs to
# use on the PV)
- for _, pvname, owner_vg in pvlist:
- test = ":" in pvname
+ for pv in pvlist:
+ test = ":" in pv.name
_ErrorIf(test, constants.CV_ENODELVM, node,
"Invalid character ':' in PV '%s' of VG '%s'",
- pvname, owner_vg)
+ pv.name, pv.vg_name)
+ if self._exclusive_storage:
+ (errmsgs, (pvmin, pvmax)) = utils.LvmExclusiveCheckNodePvs(pvlist)
+ for msg in errmsgs:
+ self._Error(constants.CV_ENODELVM, node, msg)
+ nimg.pv_min = pvmin
+ nimg.pv_max = pvmax
+
+ def _VerifyGroupLVM(self, node_image, vg_name):
+ """Check cross-node consistency in LVM.
+
+ @type node_image: dict
+ @param node_image: info about nodes, mapping from node to names to
+ L{NodeImage} objects
+ @param vg_name: the configured VG name
+
+ """
+ if vg_name is None:
+ return
+
+ # Only exlcusive storage needs this kind of checks
+ if not self._exclusive_storage:
+ return
+
+ # exclusive_storage wants all PVs to have the same size (approximately),
+ # if the smallest and the biggest ones are okay, everything is fine.
+ # pv_min is None iff pv_max is None
+ vals = filter((lambda ni: ni.pv_min is not None), node_image.values())
+ if not vals:
+ return
+ (pvmin, minnode) = min((ni.pv_min, ni.name) for ni in vals)
+ (pvmax, maxnode) = max((ni.pv_max, ni.name) for ni in vals)
+ bad = utils.LvmExclusiveTestBadPvSizes(pvmin, pvmax)
+ self._ErrorIf(bad, constants.CV_EGROUPDIFFERENTPVSIZE, self.group_info.name,
+ "PV sizes differ too much in the group; smallest (%s MB) is"
+ " on %s, biggest (%s MB) is on %s",
+ pvmin, minnode, pvmax, maxnode)
def _VerifyNodeBridges(self, ninfo, nresult, bridges):
"""Check the node bridges.
len(s) == 2 for s in statuses)
for inst, nnames in instdisk.items()
for nname, statuses in nnames.items())
- assert set(instdisk) == set(instanceinfo), "instdisk consistency failure"
+ if __debug__:
+ instdisk_keys = set(instdisk)
+ instanceinfo_keys = set(instanceinfo)
+ assert instdisk_keys == instanceinfo_keys, \
+ ("instdisk keys (%s) do not match instanceinfo keys (%s)" %
+ (instdisk_keys, instanceinfo_keys))
return instdisk
nimg.sbp[pnode] = []
nimg.sbp[pnode].append(instance)
+ es_flags = rpc.GetExclusiveStorageForNodeNames(self.cfg, self.my_node_names)
+ es_unset_nodes = []
+ # The value of exclusive_storage should be the same across the group, so if
+ # it's True for at least a node, we act as if it were set for all the nodes
+ self._exclusive_storage = compat.any(es_flags.values())
+ if self._exclusive_storage:
+ es_unset_nodes = [n for (n, es) in es_flags.items()
+ if not es]
+
+ if es_unset_nodes:
+ self._Error(constants.CV_EGROUPMIXEDESFLAG, self.group_info.name,
+ "The exclusive_storage flag should be uniform in a group,"
+ " but these nodes have it unset: %s",
+ utils.CommaJoin(utils.NiceSort(es_unset_nodes)))
+ self.LogWarning("Some checks required by exclusive storage will be"
+ " performed also on nodes with the flag unset")
+
# At this point, we have the in-memory data structures complete,
# except for the runtime information, which we'll gather next
node == master_node)
if nimg.vm_capable:
- self._VerifyNodeLVM(node_i, nresult, vg_name)
+ self._UpdateVerifyNodeLVM(node_i, nresult, vg_name, nimg)
self._VerifyNodeDrbd(node_i, nresult, self.all_inst_info, drbd_helper,
all_drbd_map)
_ErrorIf(not test, constants.CV_ENODEORPHANINSTANCE, node_i.name,
"node is running unknown instance %s", inst)
+ self._VerifyGroupLVM(node_image, vg_name)
+
for node, result in extra_lv_nvinfo.items():
self._UpdateNodeVolumes(self.all_node_info[node], result.payload,
node_image[node], vg_name)
locking.LEVEL_INSTANCE: [],
locking.LEVEL_NODEGROUP: [],
locking.LEVEL_NODE: [],
+
+ # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
+ # starts one instance of this opcode for every group, which means all
+ # nodes will be locked for a short amount of time, so it's better to
+ # acquire the node allocation lock as well.
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}
def DeclareLocks(self, level):
def ExpandNames(self):
if self.op.instances:
self.wanted_names = _GetWantedInstances(self, self.op.instances)
+ # Not getting the node allocation lock as only a specific set of
+ # instances (and their nodes) is going to be acquired
self.needed_locks = {
locking.LEVEL_NODE_RES: [],
locking.LEVEL_INSTANCE: self.wanted_names,
self.needed_locks = {
locking.LEVEL_NODE_RES: locking.ALL_SET,
locking.LEVEL_INSTANCE: locking.ALL_SET,
+
+ # This opcode is acquires the node locks for all instances
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}
+
self.share_locks = {
locking.LEVEL_NODE_RES: 1,
locking.LEVEL_INSTANCE: 0,
+ locking.LEVEL_NODE_ALLOC: 1,
}
def DeclareLocks(self, level):
def ExpandNames(self):
# FIXME: in the future maybe other cluster params won't require checking on
# all nodes to be modified.
+ # FIXME: This opcode changes cluster-wide settings. Is acquiring all
+ # resource locks the right thing, shouldn't it be the BGL instead?
self.needed_locks = {
locking.LEVEL_NODE: locking.ALL_SET,
locking.LEVEL_INSTANCE: locking.ALL_SET,
locking.LEVEL_NODEGROUP: locking.ALL_SET,
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}
- self.share_locks = {
- locking.LEVEL_NODE: 1,
- locking.LEVEL_INSTANCE: 1,
- locking.LEVEL_NODEGROUP: 1,
- }
+ self.share_locks = _ShareAll()
def BuildHooksEnv(self):
"""Build hooks env.
self.new_os_hvp[os_name] = hvs
else:
for hv_name, hv_dict in hvs.items():
- if hv_name not in self.new_os_hvp[os_name]:
+ if hv_dict is None:
+ # Delete if it exists
+ self.new_os_hvp[os_name].pop(hv_name, None)
+ elif hv_name not in self.new_os_hvp[os_name]:
self.new_os_hvp[os_name][hv_name] = hv_dict
else:
self.new_os_hvp[os_name][hv_name].update(hv_dict)
(self.op.enabled_hypervisors and
hv_name in self.op.enabled_hypervisors)):
# either this is a new hypervisor, or its parameters have changed
- hv_class = hypervisor.GetHypervisor(hv_name)
+ hv_class = hypervisor.GetHypervisorClass(hv_name)
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
hv_class.CheckParameterSyntax(hv_params)
_CheckHVParams(self, node_list, hv_name, hv_params)
# we need to fill in the new os_hvp on top of the actual hv_p
cluster_defaults = self.new_hvparams.get(hv_name, {})
new_osp = objects.FillDict(cluster_defaults, hv_params)
- hv_class = hypervisor.GetHypervisor(hv_name)
+ hv_class = hypervisor.GetHypervisorClass(hv_name)
hv_class.CheckParameterSyntax(new_osp)
_CheckHVParams(self, node_list, hv_name, new_osp)
files_vm = set(
filename
for hv_name in cluster.enabled_hypervisors
- for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[0])
+ for filename in
+ hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
files_opt |= set(
filename
for hv_name in cluster.enabled_hypervisors
- for filename in hypervisor.GetHypervisor(hv_name).GetAncillaryFiles()[1])
+ for filename in
+ hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
# Filenames in each category must be unique
all_files_set = files_all | files_mc | files_vm
def ExpandNames(self):
self.needed_locks = {
locking.LEVEL_NODE: locking.ALL_SET,
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}
- self.share_locks[locking.LEVEL_NODE] = 1
+ self.share_locks = _ShareAll()
def Exec(self, feedback_fn):
"""Redistribute the configuration.
locking.LEVEL_NODE: lock_names,
}
+ self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
+
+ if not self.op.node_names:
+ # Acquire node allocation lock only if all nodes are affected
+ self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+
def CheckPrereq(self):
"""Check prerequisites.
return self.oq.OldStyleQuery(self)
+class _ExtStorageQuery(_QueryBase):
+ FIELDS = query.EXTSTORAGE_FIELDS
+
+ def ExpandNames(self, lu):
+ # Lock all nodes in shared mode
+ # Temporary removal of locks, should be reverted later
+ # TODO: reintroduce locks when they are lighter-weight
+ lu.needed_locks = {}
+ #self.share_locks[locking.LEVEL_NODE] = 1
+ #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+
+ # The following variables interact with _QueryBase._GetNames
+ if self.names:
+ self.wanted = self.names
+ else:
+ self.wanted = locking.ALL_SET
+
+ self.do_locking = self.use_locking
+
+ def DeclareLocks(self, lu, level):
+ pass
+
+ @staticmethod
+ def _DiagnoseByProvider(rlist):
+ """Remaps a per-node return list into an a per-provider per-node dictionary
+
+ @param rlist: a map with node names as keys and ExtStorage objects as values
+
+ @rtype: dict
+ @return: a dictionary with extstorage providers as keys and as
+ value another map, with nodes as keys and tuples of
+ (path, status, diagnose, parameters) as values, eg::
+
+ {"provider1": {"node1": [(/usr/lib/..., True, "", [])]
+ "node2": [(/srv/..., False, "missing file")]
+ "node3": [(/srv/..., True, "", [])]
+ }
+
+ """
+ all_es = {}
+ # we build here the list of nodes that didn't fail the RPC (at RPC
+ # level), so that nodes with a non-responding node daemon don't
+ # make all OSes invalid
+ good_nodes = [node_name for node_name in rlist
+ if not rlist[node_name].fail_msg]
+ for node_name, nr in rlist.items():
+ if nr.fail_msg or not nr.payload:
+ continue
+ for (name, path, status, diagnose, params) in nr.payload:
+ if name not in all_es:
+ # build a list of nodes for this os containing empty lists
+ # for each node in node_list
+ all_es[name] = {}
+ for nname in good_nodes:
+ all_es[name][nname] = []
+ # convert params from [name, help] to (name, help)
+ params = [tuple(v) for v in params]
+ all_es[name][node_name].append((path, status, diagnose, params))
+ return all_es
+
+ def _GetQueryData(self, lu):
+ """Computes the list of nodes and their attributes.
+
+ """
+ # Locking is not used
+ assert not (compat.any(lu.glm.is_owned(level)
+ for level in locking.LEVELS
+ if level != locking.LEVEL_CLUSTER) or
+ self.do_locking or self.use_locking)
+
+ valid_nodes = [node.name
+ for node in lu.cfg.GetAllNodesInfo().values()
+ if not node.offline and node.vm_capable]
+ pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))
+
+ data = {}
+
+ nodegroup_list = lu.cfg.GetNodeGroupList()
+
+ for (es_name, es_data) in pol.items():
+ # For every provider compute the nodegroup validity.
+ # To do this we need to check the validity of each node in es_data
+ # and then construct the corresponding nodegroup dict:
+ # { nodegroup1: status
+ # nodegroup2: status
+ # }
+ ndgrp_data = {}
+ for nodegroup in nodegroup_list:
+ ndgrp = lu.cfg.GetNodeGroup(nodegroup)
+
+ nodegroup_nodes = ndgrp.members
+ nodegroup_name = ndgrp.name
+ node_statuses = []
+
+ for node in nodegroup_nodes:
+ if node in valid_nodes:
+ if es_data[node] != []:
+ node_status = es_data[node][0][1]
+ node_statuses.append(node_status)
+ else:
+ node_statuses.append(False)
+
+ if False in node_statuses:
+ ndgrp_data[nodegroup_name] = False
+ else:
+ ndgrp_data[nodegroup_name] = True
+
+ # Compute the provider's parameters
+ parameters = set()
+ for idx, esl in enumerate(es_data.values()):
+ valid = bool(esl and esl[0][1])
+ if not valid:
+ break
+
+ node_params = esl[0][3]
+ if idx == 0:
+ # First entry
+ parameters.update(node_params)
+ else:
+ # Filter out inconsistent values
+ parameters.intersection_update(node_params)
+
+ params = list(parameters)
+
+ # Now fill all the info for this provider
+ info = query.ExtStorageInfo(name=es_name, node_status=es_data,
+ nodegroup_status=ndgrp_data,
+ parameters=params)
+
+ data[es_name] = info
+
+ # Prepare data in requested order
+ return [data[name] for name in self._GetNames(lu, pol.keys(), None)
+ if name in data]
+
+
+class LUExtStorageDiagnose(NoHooksLU):
+ """Logical unit for ExtStorage diagnose/query.
+
+ """
+ REQ_BGL = False
+
+ def CheckArguments(self):
+ self.eq = _ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names),
+ self.op.output_fields, False)
+
+ def ExpandNames(self):
+ self.eq.ExpandNames(self)
+
+ def Exec(self, feedback_fn):
+ return self.eq.OldStyleQuery(self)
+
+
class LUNodeRemove(LogicalUnit):
"""Logical unit for removing a node.
if self.do_locking:
# If any non-static field is requested we need to lock the nodes
lu.needed_locks[locking.LEVEL_NODE] = self.wanted
+ lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
def DeclareLocks(self, lu, level):
pass
# filter out non-vm_capable nodes
toquery_nodes = [name for name in nodenames if all_info[name].vm_capable]
+ es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, toquery_nodes)
node_data = lu.rpc.call_node_info(toquery_nodes, [lu.cfg.GetVGName()],
- [lu.cfg.GetHypervisorType()])
+ [lu.cfg.GetHypervisorType()], es_flags)
live_data = dict((name, rpc.MakeLegacyNodeInfo(nresult.payload))
for (name, nresult) in node_data.items()
if not nresult.fail_msg and nresult.payload)
def ExpandNames(self):
self.share_locks = _ShareAll()
- self.needed_locks = {}
- if not self.op.nodes:
- self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ if self.op.nodes:
+ self.needed_locks = {
+ locking.LEVEL_NODE: _GetWantedNodes(self, self.op.nodes),
+ }
else:
- self.needed_locks[locking.LEVEL_NODE] = \
- _GetWantedNodes(self, self.op.nodes)
+ self.needed_locks = {
+ locking.LEVEL_NODE: locking.ALL_SET,
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
+ }
def Exec(self, feedback_fn):
"""Computes the list of nodes and their attributes.
def ExpandNames(self):
if self.lock_all:
- self.needed_locks = {locking.LEVEL_NODE: locking.ALL_SET}
+ self.needed_locks = {
+ locking.LEVEL_NODE: locking.ALL_SET,
+
+ # Block allocations when all nodes are locked
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
+ }
else:
- self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
+ self.needed_locks = {
+ locking.LEVEL_NODE: self.op.node_name,
+ }
# Since modifying a node can have severe effects on currently running
# operations the resource lock is at least acquired in shared mode
self.needed_locks[locking.LEVEL_NODE_RES] = \
self.needed_locks[locking.LEVEL_NODE]
- # Get node resource and instance locks in shared mode; they are not used
- # for anything but read-only access
- self.share_locks[locking.LEVEL_NODE_RES] = 1
- self.share_locks[locking.LEVEL_INSTANCE] = 1
+ # Get all locks except nodes in shared mode; they are not used for anything
+ # but read-only access
+ self.share_locks = _ShareAll()
+ self.share_locks[locking.LEVEL_NODE] = 0
+ self.share_locks[locking.LEVEL_NODE_RES] = 0
+ self.share_locks[locking.LEVEL_NODE_ALLOC] = 0
if self.lock_instances:
self.needed_locks[locking.LEVEL_INSTANCE] = \
drain_flag = NotImplemented
if query.CQ_WATCHER_PAUSE in self.requested_data:
- watcher_pause = utils.ReadWatcherPauseFile(pathutils.WATCHER_PAUSEFILE)
+ master_name = lu.cfg.GetMasterNode()
+
+ result = lu.rpc.call_get_watcher_pause(master_name)
+ result.Raise("Can't retrieve watcher pause from master node '%s'" %
+ master_name)
+
+ watcher_pause = result.payload
else:
watcher_pause = NotImplemented
def _CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
"""Checks if a node has enough free memory.
- This function check if a given node has the needed amount of free
+ This function checks if a given node has the needed amount of free
memory. In case the node has less memory or we cannot get the
- information from the node, this function raise an OpPrereqError
+ information from the node, this function raises an OpPrereqError
exception.
@type lu: C{LogicalUnit}
we cannot check the node
"""
- nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name])
+ nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name], False)
nodeinfo[node].Raise("Can't get data from node %s" % node,
prereq=True, ecode=errors.ECODE_ENVIRON)
(_, _, (hv_info, )) = nodeinfo[node].payload
def _CheckNodesFreeDiskPerVG(lu, nodenames, req_sizes):
- """Checks if nodes have enough free disk space in the all VGs.
+ """Checks if nodes have enough free disk space in all the VGs.
- This function check if all given nodes have the needed amount of
+ This function checks if all given nodes have the needed amount of
free disk. In case any node has less disk or we cannot get the
- information from the node, this function raise an OpPrereqError
+ information from the node, this function raises an OpPrereqError
exception.
@type lu: C{LogicalUnit}
def _CheckNodesFreeDiskOnVG(lu, nodenames, vg, requested):
"""Checks if nodes have enough free disk space in the specified VG.
- This function check if all given nodes have the needed amount of
+ This function checks if all given nodes have the needed amount of
free disk. In case any node has less disk or we cannot get the
- information from the node, this function raise an OpPrereqError
+ information from the node, this function raises an OpPrereqError
exception.
@type lu: C{LogicalUnit}
or we cannot check the node
"""
- nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None)
+ es_flags = rpc.GetExclusiveStorageForNodeNames(lu.cfg, nodenames)
+ nodeinfo = lu.rpc.call_node_info(nodenames, [vg], None, es_flags)
for node in nodenames:
info = nodeinfo[node]
info.Raise("Cannot get current information from node %s" % node,
or we cannot check the node
"""
- nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name])
+ nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
for node in nodenames:
info = nodeinfo[node]
info.Raise("Cannot get current information from node %s" % node,
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
filled_hvp = cluster.FillHV(instance)
filled_hvp.update(self.op.hvparams)
- hv_type = hypervisor.GetHypervisor(instance.hypervisor)
+ hv_type = hypervisor.GetHypervisorClass(instance.hypervisor)
hv_type.CheckParameterSyntax(filled_hvp)
_CheckHVParams(self, instance.all_nodes, instance.hypervisor, filled_hvp)
assert self.instance is not None, \
"Cannot retrieve locked instance %s" % self.op.instance_name
- _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
+ if not self.op.force:
+ _CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
+ else:
+ self.LogWarning("Ignoring offline instance check")
self.primary_offline = \
self.cfg.GetNodeInfo(self.instance.primary_node).offline
node_current = instance.primary_node
timeout = self.op.timeout
- if not self.op.no_remember:
+ # If the instance is offline we shouldn't mark it as down, as that
+ # resets the offline flag.
+ if not self.op.no_remember and instance.admin_state in INSTANCE_ONLINE:
self.cfg.MarkInstanceDown(instance.name)
if self.primary_offline:
HTYPE = constants.HTYPE_INSTANCE
REQ_BGL = False
- _MODIFYABLE = frozenset([
+ _MODIFYABLE = compat.UniqueFrozenset([
constants.IDISK_SIZE,
constants.IDISK_MODE,
])
# TODO: Implement support changing VG while recreating
constants.IDISK_VG,
constants.IDISK_METAVG,
+ constants.IDISK_PROVIDER,
]))
def _RunAllocator(self):
disks=[{constants.IDISK_SIZE: d.size,
constants.IDISK_MODE: d.mode}
for d in self.instance.disks],
- hypervisor=self.instance.hypervisor)
+ hypervisor=self.instance.hypervisor,
+ node_whitelist=None)
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
ial.Run(self.op.iallocator)
def ExpandNames(self):
self._ExpandAndLockInstance()
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
+
if self.op.nodes:
self.op.nodes = [_ExpandNodeName(self.cfg, n) for n in self.op.nodes]
self.needed_locks[locking.LEVEL_NODE] = list(self.op.nodes)
if self.op.iallocator:
# iallocator will select a new node in the same group
self.needed_locks[locking.LEVEL_NODEGROUP] = []
+ self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+
self.needed_locks[locking.LEVEL_NODE_RES] = []
def DeclareLocks(self, level):
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP):
self.needed_locks[locking.LEVEL_NODE].extend(
self.cfg.GetNodeGroup(group_uuid).members)
+
+ assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
elif not self.op.nodes:
self._LockInstancesNodes(primary_only=False)
elif level == locking.LEVEL_NODE_RES:
# Release unneeded node and node resource locks
_ReleaseLocks(self, locking.LEVEL_NODE, keep=self.op.nodes)
_ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=self.op.nodes)
+ _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
+
+ assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
def Exec(self, feedback_fn):
"""Recreate the disks.
# Change the instance lock. This is definitely safe while we hold the BGL.
# Otherwise the new lock would have to be added in acquired mode.
assert self.REQ_BGL
- assert self.glm.is_owned(locking.BGL)
+ assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
self.glm.remove(locking.LEVEL_INSTANCE, old_name)
self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
lu.needed_locks[locking.LEVEL_NODE_RES] = []
lu.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
+ # The node allocation lock is actually only needed for replicated instances
+ # (e.g. DRBD8) and if an iallocator is used.
+ lu.needed_locks[locking.LEVEL_NODE_ALLOC] = []
+
def _DeclareLocksForMigration(lu, level):
"""Declares locks for L{TLMigrateInstance}.
@param level: Lock level
"""
- if level == locking.LEVEL_NODE:
+ if level == locking.LEVEL_NODE_ALLOC:
+ assert lu.op.instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
+
instance = lu.cfg.GetInstanceInfo(lu.op.instance_name)
+
+ # Node locks are already declared here rather than at LEVEL_NODE as we need
+ # the instance object anyway to declare the node allocation lock.
if instance.disk_template in constants.DTS_EXT_MIRROR:
if lu.op.target_node is None:
lu.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
else:
lu.needed_locks[locking.LEVEL_NODE] = [instance.primary_node,
lu.op.target_node]
del lu.recalculate_locks[locking.LEVEL_NODE]
else:
lu._LockInstancesNodes() # pylint: disable=W0212
+
+ elif level == locking.LEVEL_NODE:
+ # Node locks are declared together with the node allocation lock
+ assert (lu.needed_locks[locking.LEVEL_NODE] or
+ lu.needed_locks[locking.LEVEL_NODE] is locking.ALL_SET)
+
elif level == locking.LEVEL_NODE_RES:
# Copy node locks
lu.needed_locks[locking.LEVEL_NODE_RES] = \
errors.ECODE_STATE)
if instance.disk_template in constants.DTS_EXT_MIRROR:
+ assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
+
_CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
if self.lu.op.iallocator:
# in the LU
_ReleaseLocks(self.lu, locking.LEVEL_NODE,
keep=[instance.primary_node, self.target_node])
+ _ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
else:
+ assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
+
secondary_nodes = instance.secondary_nodes
if not secondary_nodes:
raise errors.ConfigurationError("No secondary node but using"
"""Run the allocator based on input opcode.
"""
+ assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
+
# FIXME: add a self.ignore_ipolicy option
req = iallocator.IAReqRelocate(name=self.instance_name,
relocate_from=[self.instance.primary_node])
# Check for hypervisor version mismatch and warn the user.
nodeinfo = self.rpc.call_node_info([source_node, target_node],
- None, [self.instance.hypervisor])
+ None, [self.instance.hypervisor], False)
for ninfo in nodeinfo.values():
ninfo.Raise("Unable to retrieve node information from node '%s'" %
ninfo.node)
self._GoReconnect(False)
self._WaitUntilSync()
- # If the instance's disk template is `rbd' and there was a successful
- # migration, unmap the device from the source node.
- if self.instance.disk_template == constants.DT_RBD:
+ # If the instance's disk template is `rbd' or `ext' and there was a
+ # successful migration, unmap the device from the source node.
+ if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
disks = _ExpandCheckDisks(instance, instance.disks)
self.feedback_fn("* unmapping instance's disks from %s" % source_node)
for disk in disks:
"""
(disk,) = _AnnotateDiskParams(instance, [device], lu.cfg)
+ excl_stor = _IsExclusiveStorageEnabledNodeName(lu.cfg, node)
return _CreateBlockDevInner(lu, node, instance, disk, force_create, info,
- force_open)
+ force_open, excl_stor)
def _CreateBlockDevInner(lu, node, instance, device, force_create,
- info, force_open):
+ info, force_open, excl_stor):
"""Create a tree of block devices on a given node.
If this device type has to be created on secondaries, create it and
L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
+ @type excl_stor: boolean
+ @param excl_stor: Whether exclusive_storage is active for the node
"""
if device.CreateOnSecondary():
if device.children:
for child in device.children:
_CreateBlockDevInner(lu, node, instance, child, force_create,
- info, force_open)
+ info, force_open, excl_stor)
if not force_create:
return
- _CreateSingleBlockDev(lu, node, instance, device, info, force_open)
+ _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+ excl_stor)
-def _CreateSingleBlockDev(lu, node, instance, device, info, force_open):
+def _CreateSingleBlockDev(lu, node, instance, device, info, force_open,
+ excl_stor):
"""Create a single block device on a given node.
This will not recurse over children of the device, so they must be
L{backend.BlockdevCreate} function where it specifies
whether we run on primary or not, and it affects both
the child assembly and the device own Open() execution
+ @type excl_stor: boolean
+ @param excl_stor: Whether exclusive_storage is active for the node
"""
lu.cfg.SetDiskID(device, node)
result = lu.rpc.call_blockdev_create(node, device, device.size,
- instance.name, force_open, info)
+ instance.name, force_open, info,
+ excl_stor)
result.Raise("Can't create block device %s on"
" node %s for instance %s" % (device, node, instance.name))
if device.physical_id is None:
_DISK_TEMPLATE_NAME_PREFIX = {
constants.DT_PLAIN: "",
constants.DT_RBD: ".rbd",
+ constants.DT_EXT: ".ext",
}
constants.DT_SHARED_FILE: constants.LD_FILE,
constants.DT_BLOCK: constants.LD_BLOCKDEV,
constants.DT_RBD: constants.LD_RBD,
+ constants.DT_EXT: constants.LD_EXT,
}
"""Generate the entire disk layout for a given template type.
"""
- #TODO: compute space requirements
-
vgname = lu.cfg.GetVGName()
disk_count = len(disk_info)
disks = []
disk[constants.IDISK_ADOPT])
elif template_name == constants.DT_RBD:
logical_id_fn = lambda idx, _, disk: ("rbd", names[idx])
+ elif template_name == constants.DT_EXT:
+ def logical_id_fn(idx, _, disk):
+ provider = disk.get(constants.IDISK_PROVIDER, None)
+ if provider is None:
+ raise errors.ProgrammerError("Disk template is %s, but '%s' is"
+ " not found", constants.DT_EXT,
+ constants.IDISK_PROVIDER)
+ return (provider, names[idx])
else:
raise errors.ProgrammerError("Unknown disk template '%s'" % template_name)
dev_type = _DISK_TEMPLATE_DEVICE_TYPE[template_name]
for idx, disk in enumerate(disk_info):
+ params = {}
+ # Only for the Ext template add disk_info to params
+ if template_name == constants.DT_EXT:
+ params[constants.IDISK_PROVIDER] = disk[constants.IDISK_PROVIDER]
+ for key in disk:
+ if key not in constants.IDISK_PARAMS:
+ params[key] = disk[key]
disk_index = idx + base_index
size = disk[constants.IDISK_SIZE]
feedback_fn("* disk %s, size %s" %
logical_id=logical_id_fn(idx, disk_index, disk),
iv_name="disk/%d" % disk_index,
mode=disk[constants.IDISK_MODE],
- params={}))
+ params=params))
return disks
osname, node)
-def _CreateInstanceAllocRequest(op, disks, nics, beparams):
+def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
"""Wrapper around IAReqInstanceAlloc.
@param op: The instance opcode
@param disks: The computed disks
@param nics: The computed nics
@param beparams: The full filled beparams
+ @param node_whitelist: List of nodes which should appear as online to the
+ allocator (unless the node is already marked offline)
@returns: A filled L{iallocator.IAReqInstanceAlloc}
spindle_use=spindle_use,
disks=disks,
nics=[n.ToDict() for n in nics],
- hypervisor=op.hypervisor)
+ hypervisor=op.hypervisor,
+ node_whitelist=node_whitelist)
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
@param op: The instance opcode
@param default_vg: The default_vg to assume
- @return: The computer disks
+ @return: The computed disks
"""
disks = []
raise errors.OpPrereqError("Invalid disk size '%s'" % size,
errors.ECODE_INVAL)
+ ext_provider = disk.get(constants.IDISK_PROVIDER, None)
+ if ext_provider and op.disk_template != constants.DT_EXT:
+ raise errors.OpPrereqError("The '%s' option is only valid for the %s"
+ " disk template, not %s" %
+ (constants.IDISK_PROVIDER, constants.DT_EXT,
+ op.disk_template), errors.ECODE_INVAL)
+
data_vg = disk.get(constants.IDISK_VG, default_vg)
new_disk = {
constants.IDISK_SIZE: size,
constants.IDISK_MODE: mode,
constants.IDISK_VG: data_vg,
}
+
if constants.IDISK_METAVG in disk:
new_disk[constants.IDISK_METAVG] = disk[constants.IDISK_METAVG]
if constants.IDISK_ADOPT in disk:
new_disk[constants.IDISK_ADOPT] = disk[constants.IDISK_ADOPT]
+
+ # For extstorage, demand the `provider' option and add any
+ # additional parameters (ext-params) to the dict
+ if op.disk_template == constants.DT_EXT:
+ if ext_provider:
+ new_disk[constants.IDISK_PROVIDER] = ext_provider
+ for key in disk:
+ if key not in constants.IDISK_PARAMS:
+ new_disk[key] = disk[key]
+ else:
+ raise errors.OpPrereqError("Missing provider for template '%s'" %
+ constants.DT_EXT, errors.ECODE_INVAL)
+
disks.append(new_disk)
return disks
return cluster.SimpleFillBE(op.beparams)
+def _CheckOpportunisticLocking(op):
+ """Generate error if opportunistic locking is not possible.
+
+ """
+ if op.opportunistic_locking and not op.iallocator:
+ raise errors.OpPrereqError("Opportunistic locking is only available in"
+ " combination with an instance allocator",
+ errors.ECODE_INVAL)
+
+
class LUInstanceCreate(LogicalUnit):
"""Create an instance.
# check disks. parameter names and consistent adopt/no-adopt strategy
has_adopt = has_no_adopt = False
for disk in self.op.disks:
- utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
+ if self.op.disk_template != constants.DT_EXT:
+ utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
if constants.IDISK_ADOPT in disk:
has_adopt = True
else:
" template")
self.op.snode = None
+ _CheckOpportunisticLocking(self.op)
+
self._cds = _GetClusterDomainSecret()
if self.op.mode == constants.INSTANCE_IMPORT:
# specifying a group on instance creation and then selecting nodes from
# that group
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
- self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
+ self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+
+ if self.op.opportunistic_locking:
+ self.opportunistic_locks[locking.LEVEL_NODE] = True
+ self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
else:
self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
nodelist = [self.op.pnode]
self.op.snode = _ExpandNodeName(self.cfg, self.op.snode)
nodelist.append(self.op.snode)
self.needed_locks[locking.LEVEL_NODE] = nodelist
- # Lock resources of instance's primary and secondary nodes (copy to
- # prevent accidential modification)
- self.needed_locks[locking.LEVEL_NODE_RES] = list(nodelist)
# in case of import lock the source node too
if self.op.mode == constants.INSTANCE_IMPORT:
if src_node is None:
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
self.op.src_node = None
if os.path.isabs(src_path):
raise errors.OpPrereqError("Importing an instance from a path"
self.op.src_path = src_path = \
utils.PathJoin(pathutils.EXPORT_DIR, src_path)
+ self.needed_locks[locking.LEVEL_NODE_RES] = \
+ _CopyLockList(self.needed_locks[locking.LEVEL_NODE])
+
def _RunAllocator(self):
"""Run the allocator based on input opcode.
"""
+ if self.op.opportunistic_locking:
+ # Only consider nodes for which a lock is held
+ node_whitelist = self.owned_locks(locking.LEVEL_NODE)
+ else:
+ node_whitelist = None
+
#TODO Export network to iallocator so that it chooses a pnode
# in a nodegroup that has the desired network connected to
req = _CreateInstanceAllocRequest(self.op, self.disks,
- self.nics, self.be_full)
+ self.nics, self.be_full,
+ node_whitelist)
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
ial.Run(self.op.iallocator)
if not ial.success:
+ # When opportunistic locks are used only a temporary failure is generated
+ if self.op.opportunistic_locking:
+ ecode = errors.ECODE_TEMP_NORES
+ else:
+ ecode = errors.ECODE_NORES
+
raise errors.OpPrereqError("Can't compute nodes using"
" iallocator '%s': %s" %
(self.op.iallocator, ial.info),
- errors.ECODE_NORES)
+ ecode)
+
self.op.pnode = ial.result[0]
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
self.op.instance_name, self.op.iallocator,
utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
self.op.hvparams)
- hv_type = hypervisor.GetHypervisor(self.op.hypervisor)
+ hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
hv_type.CheckParameterSyntax(filled_hvp)
self.hv_full = filled_hvp
# check that we don't specify global parameters on an instance
self._RevertToDefaults(cluster)
# NIC buildup
- self.nics = _ComputeNics(self.op, cluster, self.hostname1.ip, self.cfg,
+ self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
self.proc.GetECId())
# disk checks/pre-build
self._RunAllocator()
# Release all unneeded node locks
- _ReleaseLocks(self, locking.LEVEL_NODE,
- keep=filter(None, [self.op.pnode, self.op.snode,
- self.op.src_node]))
- _ReleaseLocks(self, locking.LEVEL_NODE_RES,
- keep=filter(None, [self.op.pnode, self.op.snode,
- self.op.src_node]))
+ keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
+ _ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
+ _ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
+ _ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
+
+ assert (self.owned_locks(locking.LEVEL_NODE) ==
+ self.owned_locks(locking.LEVEL_NODE_RES)), \
+ "Node locks differ from node resource locks"
#### node related checks
" or does not belong to network %s" %
(nic.ip, net),
errors.ECODE_NOTUNIQUE)
- else:
- # net is None, ip None or given
- if self.op.conflicts_check:
- _CheckForConflictingIp(self, nic.ip, self.pnode.name)
+
+ # net is None, ip None or given
+ elif self.op.conflicts_check:
+ _CheckForConflictingIp(self, nic.ip, self.pnode.name)
# mirror node verification
if self.op.disk_template in constants.DTS_INT_MIRROR:
" from the first disk's node group will be"
" used")
+ if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
+ nodes = [pnode]
+ if self.op.disk_template in constants.DTS_INT_MIRROR:
+ nodes.append(snode)
+ has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+ if compat.any(map(has_es, nodes)):
+ raise errors.OpPrereqError("Disk template %s not supported with"
+ " exclusive storage" % self.op.disk_template,
+ errors.ECODE_STATE)
+
nodenames = [pnode.name] + self.secondaries
# Verify instance specs
# Any function that checks prerequisites can be placed here.
# Check if there is enough space on the RADOS cluster.
_CheckRADOSFreeSpace()
+ elif self.op.disk_template == constants.DT_EXT:
+ # FIXME: Function that checks prereqs if needed
+ pass
else:
# Check lv size requirements, if not adopting
req_sizes = _ComputeDiskSizePerVG(self.op.disk_template, self.disks)
_CheckNicsBridgesExist(self, self.nics, self.pnode.name)
+ #TODO: _CheckExtParams (remotely)
+ # Check parameters for extstorage
+
# memory check on primary node
#TODO(dynmem): use MINMEM for checking
if self.op.start:
assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
self.owned_locks(locking.LEVEL_NODE)), \
"Node locks differ from node resource locks"
+ assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
ht_kind = self.op.hypervisor
if ht_kind in constants.HTS_REQ_PORT:
" or set a cluster-wide default iallocator",
errors.ECODE_INVAL)
+ _CheckOpportunisticLocking(self.op)
+
dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
if dups:
raise errors.OpPrereqError("There are duplicate instance names: %s" %
"""
self.share_locks = _ShareAll()
- self.needed_locks = {}
+ self.needed_locks = {
+ # iallocator will select nodes and even if no iallocator is used,
+ # collisions with LUInstanceCreate should be avoided
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
+ }
if self.op.iallocator:
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
+
+ if self.op.opportunistic_locking:
+ self.opportunistic_locks[locking.LEVEL_NODE] = True
+ self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
else:
nodeslist = []
for inst in self.op.instances:
default_vg = self.cfg.GetVGName()
ec_id = self.proc.GetECId()
+ if self.op.opportunistic_locking:
+ # Only consider nodes for which a lock is held
+ node_whitelist = self.owned_locks(locking.LEVEL_NODE)
+ else:
+ node_whitelist = None
+
insts = [_CreateInstanceAllocRequest(op, _ComputeDisks(op, default_vg),
_ComputeNics(op, cluster, None,
self.cfg, ec_id),
- _ComputeFullBeParams(op, cluster))
+ _ComputeFullBeParams(op, cluster),
+ node_whitelist)
for op in self.op.instances]
req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
self.ia_result = ial.result
if self.op.dry_run:
- self.dry_run_rsult = objects.FillDict(self._ConstructPartialResult(), {
+ self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
constants.JOB_IDS_KEY: [],
})
@rtype: dict
"""
- hyper = hypervisor.GetHypervisor(instance.hypervisor)
+ hyper = hypervisor.GetHypervisorClass(instance.hypervisor)
# beparams and hvparams are passed separately, to avoid editing the
# instance and then saving the defaults in the instance itself.
hvparams = cluster.FillHV(instance)
if self.op.iallocator is not None:
# iallocator will select a new node in the same group
self.needed_locks[locking.LEVEL_NODEGROUP] = []
+ self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODE_RES] = []
if self.op.iallocator is not None:
assert self.op.remote_node is None
assert not self.needed_locks[locking.LEVEL_NODE]
+ assert locking.NAL in self.owned_locks(locking.LEVEL_NODE_ALLOC)
# Lock member nodes of all locked groups
self.needed_locks[locking.LEVEL_NODE] = \
for group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
for node_name in self.cfg.GetNodeGroup(group_uuid).members]
else:
+ assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
+
self._LockInstancesNodes()
+
elif level == locking.LEVEL_NODE_RES:
# Reuse node locks
self.needed_locks[locking.LEVEL_NODE_RES] = \
# Release unneeded node and node resource locks
_ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
_ReleaseLocks(self.lu, locking.LEVEL_NODE_RES, keep=touched_nodes)
+ _ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
# Release any owned node group
- if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
- _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
+ _ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
# Check whether disks are valid
for disk_idx in self.disks:
(owned_nodes, self.node_secondary_ip.keys()))
assert (self.lu.owned_locks(locking.LEVEL_NODE) ==
self.lu.owned_locks(locking.LEVEL_NODE_RES))
+ assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
owned_instances = self.lu.owned_locks(locking.LEVEL_INSTANCE)
assert list(owned_instances) == [self.instance_name], \
feedback_fn("Replacing disk(s) %s for instance '%s'" %
(utils.CommaJoin(self.disks), self.instance.name))
- feedback_fn("Current primary node: %s", self.instance.primary_node)
- feedback_fn("Current seconary node: %s",
+ feedback_fn("Current primary node: %s" % self.instance.primary_node)
+ feedback_fn("Current seconary node: %s" %
utils.CommaJoin(self.instance.secondary_nodes))
activate_disks = (self.instance.admin_state != constants.ADMINST_UP)
new_lvs = [lv_data, lv_meta]
old_lvs = [child.Copy() for child in dev.children]
iv_names[dev.iv_name] = (dev, old_lvs, new_lvs)
+ excl_stor = _IsExclusiveStorageEnabledNodeName(self.lu.cfg, node_name)
# we pass force_create=True to force the LVM creation
for new_lv in new_lvs:
_CreateBlockDevInner(self.lu, node_name, self.instance, new_lv, True,
- _GetInstanceInfoText(self.instance), False)
+ _GetInstanceInfoText(self.instance), False,
+ excl_stor)
return iv_names
# Step: create new storage
self.lu.LogStep(3, steps_total, "Allocate new storage")
disks = _AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
+ excl_stor = _IsExclusiveStorageEnabledNodeName(self.lu.cfg, self.new_node)
for idx, dev in enumerate(disks):
self.lu.LogInfo("Adding new local storage on %s for disk/%d" %
(self.new_node, idx))
# we pass force_create=True to force LVM creation
for new_lv in dev.children:
_CreateBlockDevInner(self.lu, self.new_node, self.instance, new_lv,
- True, _GetInstanceInfoText(self.instance), False)
+ True, _GetInstanceInfoText(self.instance), False,
+ excl_stor)
# Step 4: dbrd minors and drbd setups changes
# after this, we must manually remove the drbd minors on both the
try:
_CreateSingleBlockDev(self.lu, self.new_node, self.instance,
anno_new_drbd,
- _GetInstanceInfoText(self.instance), False)
+ _GetInstanceInfoText(self.instance), False,
+ excl_stor)
except errors.GenericError:
self.cfg.ReleaseDRBDMinors(self.instance.name)
raise
utils.FormatUnit(self.delta, "h"),
errors.ECODE_INVAL)
- if instance.disk_template not in (constants.DT_FILE,
- constants.DT_SHARED_FILE,
- constants.DT_RBD):
+ self._CheckDiskSpace(nodenames, self.disk.ComputeGrowth(self.delta))
+
+ def _CheckDiskSpace(self, nodenames, req_vgspace):
+ template = self.instance.disk_template
+ if template not in (constants.DTS_NO_FREE_SPACE_CHECK):
# TODO: check the free disk space for file, when that feature will be
# supported
- _CheckNodesFreeDiskPerVG(self, nodenames,
- self.disk.ComputeGrowth(self.delta))
+ nodes = map(self.cfg.GetNodeInfo, nodenames)
+ es_nodes = filter(lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n),
+ nodes)
+ if es_nodes:
+ # With exclusive storage we need to something smarter than just looking
+ # at free space; for now, let's simply abort the operation.
+ raise errors.OpPrereqError("Cannot grow disks when exclusive_storage"
+ " is enabled", errors.ECODE_STATE)
+ _CheckNodesFreeDiskPerVG(self, nodenames, req_vgspace)
def Exec(self, feedback_fn):
"""Execute disk grow.
for (op, _, params) in mods:
assert ht.TDict(params)
- utils.ForceDictType(params, key_types)
+ # If 'key_types' is an empty dict, we assume we have an
+ # 'ext' template and thus do not ForceDictType
+ if key_types:
+ utils.ForceDictType(params, key_types)
if op == constants.DDM_REMOVE:
if params:
params[constants.IDISK_SIZE] = size
- elif op == constants.DDM_MODIFY and constants.IDISK_SIZE in params:
- raise errors.OpPrereqError("Disk size change not possible, use"
- " grow-disk", errors.ECODE_INVAL)
+ elif op == constants.DDM_MODIFY:
+ if constants.IDISK_SIZE in params:
+ raise errors.OpPrereqError("Disk size change not possible, use"
+ " grow-disk", errors.ECODE_INVAL)
+ if constants.IDISK_MODE not in params:
+ raise errors.OpPrereqError("Disk 'mode' is the only kind of"
+ " modification supported, but missing",
+ errors.ECODE_NOENT)
+ if len(params) > 1:
+ raise errors.OpPrereqError("Disk modification doesn't support"
+ " additional arbitrary parameters",
+ errors.ECODE_INVAL)
@staticmethod
def _VerifyNicModification(op, params):
self.op.nics = self._UpgradeDiskNicMods(
"NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
- # Check disk modifications
- self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
- self._VerifyDiskModification)
-
if self.op.disks and self.op.disk_template is not None:
raise errors.OpPrereqError("Disk template conversion and other disk"
" changes not supported at the same time",
raise errors.OpPrereqError("Cannot set the NIC IP address to None"
" on a routed NIC", errors.ECODE_INVAL)
+ elif new_mode == constants.NIC_MODE_OVS:
+ # TODO: check OVS link
+ self.LogInfo("OVS links are currently not checked for correctness")
+
if constants.INIC_MAC in params:
mac = params[constants.INIC_MAC]
if mac is None:
elif new_ip.lower() == constants.NIC_IP_POOL:
raise errors.OpPrereqError("ip=pool, but no network found",
errors.ECODE_INVAL)
- else:
- # new net is None
- if self.op.conflicts_check:
- _CheckForConflictingIp(self, new_ip, pnode)
+
+ # new net is None
+ elif self.op.conflicts_check:
+ _CheckForConflictingIp(self, new_ip, pnode)
if old_ip:
if old_net:
private.params = new_params
private.filled = new_filled_params
+ def _PreCheckDiskTemplate(self, pnode_info):
+ """CheckPrereq checks related to a new disk template."""
+ # Arguments are passed to avoid configuration lookups
+ instance = self.instance
+ pnode = instance.primary_node
+ cluster = self.cluster
+ if instance.disk_template == self.op.disk_template:
+ raise errors.OpPrereqError("Instance already has disk template %s" %
+ instance.disk_template, errors.ECODE_INVAL)
+
+ if (instance.disk_template,
+ self.op.disk_template) not in self._DISK_CONVERSIONS:
+ raise errors.OpPrereqError("Unsupported disk template conversion from"
+ " %s to %s" % (instance.disk_template,
+ self.op.disk_template),
+ errors.ECODE_INVAL)
+ _CheckInstanceState(self, instance, INSTANCE_DOWN,
+ msg="cannot change disk template")
+ if self.op.disk_template in constants.DTS_INT_MIRROR:
+ if self.op.remote_node == pnode:
+ raise errors.OpPrereqError("Given new secondary node %s is the same"
+ " as the primary node of the instance" %
+ self.op.remote_node, errors.ECODE_STATE)
+ _CheckNodeOnline(self, self.op.remote_node)
+ _CheckNodeNotDrained(self, self.op.remote_node)
+ # FIXME: here we assume that the old instance type is DT_PLAIN
+ assert instance.disk_template == constants.DT_PLAIN
+ disks = [{constants.IDISK_SIZE: d.size,
+ constants.IDISK_VG: d.logical_id[0]}
+ for d in instance.disks]
+ required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
+ _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
+
+ snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
+ snode_group = self.cfg.GetNodeGroup(snode_info.group)
+ ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
+ snode_group)
+ _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
+ ignore=self.op.ignore_ipolicy)
+ if pnode_info.group != snode_info.group:
+ self.LogWarning("The primary and secondary nodes are in two"
+ " different node groups; the disk parameters"
+ " from the first disk's node group will be"
+ " used")
+
+ if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
+ # Make sure none of the nodes require exclusive storage
+ nodes = [pnode_info]
+ if self.op.disk_template in constants.DTS_INT_MIRROR:
+ assert snode_info
+ nodes.append(snode_info)
+ has_es = lambda n: _IsExclusiveStorageEnabledNode(self.cfg, n)
+ if compat.any(map(has_es, nodes)):
+ errmsg = ("Cannot convert disk template from %s to %s when exclusive"
+ " storage is enabled" % (instance.disk_template,
+ self.op.disk_template))
+ raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
+
def CheckPrereq(self):
"""Check prerequisites.
# dictionary with instance information after the modification
ispec = {}
+ # Check disk modifications. This is done here and not in CheckArguments
+ # (as with NICs), because we need to know the instance's disk template
+ if instance.disk_template == constants.DT_EXT:
+ self._CheckMods("disk", self.op.disks, {},
+ self._VerifyDiskModification)
+ else:
+ self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
+ self._VerifyDiskModification)
+
# Prepare disk/NIC modifications
self.diskmod = PrepareContainerMods(self.op.disks, None)
self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate)
+ # Check the validity of the `provider' parameter
+ if instance.disk_template in constants.DT_EXT:
+ for mod in self.diskmod:
+ ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
+ if mod[0] == constants.DDM_ADD:
+ if ext_provider is None:
+ raise errors.OpPrereqError("Instance template is '%s' and parameter"
+ " '%s' missing, during disk add" %
+ (constants.DT_EXT,
+ constants.IDISK_PROVIDER),
+ errors.ECODE_NOENT)
+ elif mod[0] == constants.DDM_MODIFY:
+ if ext_provider:
+ raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
+ " modification" %
+ constants.IDISK_PROVIDER,
+ errors.ECODE_INVAL)
+ else:
+ for mod in self.diskmod:
+ ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
+ if ext_provider is not None:
+ raise errors.OpPrereqError("Parameter '%s' is only valid for"
+ " instances of type '%s'" %
+ (constants.IDISK_PROVIDER,
+ constants.DT_EXT),
+ errors.ECODE_INVAL)
+
# OS change
if self.op.os_name and not self.op.force:
_CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
"Can't modify disk template and apply disk changes at the same time"
if self.op.disk_template:
- if instance.disk_template == self.op.disk_template:
- raise errors.OpPrereqError("Instance already has disk template %s" %
- instance.disk_template, errors.ECODE_INVAL)
-
- if (instance.disk_template,
- self.op.disk_template) not in self._DISK_CONVERSIONS:
- raise errors.OpPrereqError("Unsupported disk template conversion from"
- " %s to %s" % (instance.disk_template,
- self.op.disk_template),
- errors.ECODE_INVAL)
- _CheckInstanceState(self, instance, INSTANCE_DOWN,
- msg="cannot change disk template")
- if self.op.disk_template in constants.DTS_INT_MIRROR:
- if self.op.remote_node == pnode:
- raise errors.OpPrereqError("Given new secondary node %s is the same"
- " as the primary node of the instance" %
- self.op.remote_node, errors.ECODE_STATE)
- _CheckNodeOnline(self, self.op.remote_node)
- _CheckNodeNotDrained(self, self.op.remote_node)
- # FIXME: here we assume that the old instance type is DT_PLAIN
- assert instance.disk_template == constants.DT_PLAIN
- disks = [{constants.IDISK_SIZE: d.size,
- constants.IDISK_VG: d.logical_id[0]}
- for d in instance.disks]
- required = _ComputeDiskSizePerVG(self.op.disk_template, disks)
- _CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
-
- snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
- snode_group = self.cfg.GetNodeGroup(snode_info.group)
- ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
- snode_group)
- _CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info,
- ignore=self.op.ignore_ipolicy)
- if pnode_info.group != snode_info.group:
- self.LogWarning("The primary and secondary nodes are in two"
- " different node groups; the disk parameters"
- " from the first disk's node group will be"
- " used")
+ self._PreCheckDiskTemplate(pnode_info)
# hvparams processing
if self.op.hvparams:
hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
# local check
- hypervisor.GetHypervisor(hv_type).CheckParameterSyntax(hv_new)
+ hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
_CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
self.hv_proposed = self.hv_new = hv_new # the new actual values
self.hv_inst = i_hvdict # the new dict (without defaults)
instance_info = self.rpc.call_instance_info(pnode, instance.name,
instance.hypervisor)
nodeinfo = self.rpc.call_node_info(mem_check_list, None,
- [instance.hypervisor])
+ [instance.hypervisor], False)
pninfo = nodeinfo[pnode]
msg = pninfo.fail_msg
if msg:
errors.ECODE_STATE)
disk_sizes = [disk.size for disk in instance.disks]
disk_sizes.extend(params["size"] for (op, idx, params, private) in
- self.diskmod)
+ self.diskmod if op == constants.DDM_ADD)
ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
- if self.op.offline is not None:
- if self.op.offline:
- msg = "can't change to offline"
- else:
- msg = "can't change to online"
- _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE, msg=msg)
+ if self.op.offline is not None and self.op.offline:
+ _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
+ msg="can't change to offline")
# Pre-compute NIC changes (necessary to use result in hooks)
self._nic_chgdesc = []
self.diskparams)
anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
self.diskparams)
+ p_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
+ s_excl_stor = _IsExclusiveStorageEnabledNodeName(self.cfg, snode)
info = _GetInstanceInfoText(instance)
feedback_fn("Creating additional volumes...")
# first, create the missing data and meta devices
for disk in anno_disks:
# unfortunately this is... not too nice
_CreateSingleBlockDev(self, pnode, instance, disk.children[1],
- info, True)
+ info, True, p_excl_stor)
for child in disk.children:
- _CreateSingleBlockDev(self, snode, instance, child, info, True)
+ _CreateSingleBlockDev(self, snode, instance, child, info, True,
+ s_excl_stor)
# at this stage, all new LVs have been created, we can rename the
# old ones
feedback_fn("Renaming original volumes...")
feedback_fn("Initializing DRBD devices...")
# all child devices are in place, we can now create the DRBD devices
for disk in anno_disks:
- for node in [pnode, snode]:
+ for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
f_create = node == pnode
- _CreateSingleBlockDev(self, node, instance, disk, info, f_create)
+ _CreateSingleBlockDev(self, node, instance, disk, info, f_create,
+ excl_stor)
# at this point, the instance has been modified
instance.disk_template = constants.DT_DRBD8
def ExpandNames(self):
self.share_locks = _ShareAll()
+
self.needed_locks = {
locking.LEVEL_NODEGROUP: [],
locking.LEVEL_NODE: [],
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
}
self._ExpandAndLockInstance()
locking.LEVEL_NODE: self.wanted,
}
+ if not self.names:
+ lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+
def DeclareLocks(self, lu, level):
pass
# - removing the removal operation altogether
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ # Allocations should be stopped while this LU runs with node locks, but
+ # it doesn't have to be exclusive
+ self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
+ self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+
def DeclareLocks(self, level):
"""Last minute lock declaration."""
# All nodes are locked anyway, so nothing to do here.
REQ_BGL = False
def ExpandNames(self):
- self.needed_locks = {}
- # We need all nodes to be locked in order for RemoveExport to work, but we
- # don't need to lock the instance itself, as nothing will happen to it (and
- # we can remove exports also for a removed instance)
- self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ self.needed_locks = {
+ # We need all nodes to be locked in order for RemoveExport to work, but
+ # we don't need to lock the instance itself, as nothing will happen to it
+ # (and we can remove exports also for a removed instance)
+ locking.LEVEL_NODE: locking.ALL_SET,
+
+ # Removing backups is quick, so blocking allocations is justified
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
+ }
+
+ # Allocations should be stopped while this LU runs with node locks, but it
+ # doesn't have to be exclusive
+ self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
def Exec(self, feedback_fn):
"""Remove any export.
self.op.mode, errors.ECODE_INVAL)
if self.op.direction == constants.IALLOCATOR_DIR_OUT:
- if self.op.allocator is None:
+ if self.op.iallocator is None:
raise errors.OpPrereqError("Missing allocator name",
errors.ECODE_INVAL)
elif self.op.direction != constants.IALLOCATOR_DIR_IN:
if self.op.direction == constants.IALLOCATOR_DIR_IN:
result = ial.in_text
else:
- ial.Run(self.op.allocator, validate=False)
+ ial.Run(self.op.iallocator, validate=False)
result = ial.out_text
return result
-# Network LUs
class LUNetworkAdd(LogicalUnit):
"""Logical unit for creating networks.
mn = self.cfg.GetMasterNode()
return ([mn], [mn])
+ def CheckArguments(self):
+ if self.op.mac_prefix:
+ self.op.mac_prefix = \
+ utils.NormalizeAndValidateThreeOctetMacPrefix(self.op.mac_prefix)
+
def ExpandNames(self):
self.network_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
- self.needed_locks = {}
- self.add_locks[locking.LEVEL_NETWORK] = self.network_uuid
- def CheckPrereq(self):
- """Check prerequisites.
+ if self.op.conflicts_check:
+ self.share_locks[locking.LEVEL_NODE] = 1
+ self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
+ self.needed_locks = {
+ locking.LEVEL_NODE: locking.ALL_SET,
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
+ }
+ else:
+ self.needed_locks = {}
- This checks that the given group name is not an existing node group
- already.
+ self.add_locks[locking.LEVEL_NETWORK] = self.network_uuid
- """
+ def CheckPrereq(self):
if self.op.network is None:
raise errors.OpPrereqError("Network must be given",
errors.ECODE_INVAL)
uuid = self.cfg.LookupNetwork(self.op.network_name)
if uuid:
- raise errors.OpPrereqError("Network '%s' already defined" %
- self.op.network, errors.ECODE_EXISTS)
-
- if self.op.mac_prefix:
- utils.NormalizeAndValidateMac(self.op.mac_prefix + ":00:00:00")
+ raise errors.OpPrereqError(("Network with name '%s' already exists" %
+ self.op.network_name), errors.ECODE_EXISTS)
# Check tag validity
for tag in self.op.tags:
mac_prefix=self.op.mac_prefix,
network_type=self.op.network_type,
uuid=self.network_uuid,
- family=4)
+ family=constants.IP4_VERSION)
# Initialize the associated address pool
try:
pool = network.AddressPool.InitializeNetwork(nobj)
except errors.AddressPoolError, e:
- raise errors.OpExecError("Cannot create IP pool for this network. %s" % e)
+ raise errors.OpExecError("Cannot create IP pool for this network: %s" % e)
# Check if we need to reserve the nodes and the cluster master IP
# These may not be allocated to any instances in routed mode, as
# they wouldn't function anyway.
- for node in self.cfg.GetAllNodesInfo().values():
- for ip in [node.primary_ip, node.secondary_ip]:
- try:
- pool.Reserve(ip)
- self.LogInfo("Reserved node %s's IP (%s)", node.name, ip)
-
- except errors.AddressPoolError:
- pass
+ if self.op.conflicts_check:
+ for node in self.cfg.GetAllNodesInfo().values():
+ for ip in [node.primary_ip, node.secondary_ip]:
+ try:
+ if pool.Contains(ip):
+ pool.Reserve(ip)
+ self.LogInfo("Reserved IP address of node '%s' (%s)",
+ node.name, ip)
+ except errors.AddressPoolError:
+ self.LogWarning("Cannot reserve IP address of node '%s' (%s)",
+ node.name, ip)
- master_ip = self.cfg.GetClusterInfo().master_ip
- try:
- pool.Reserve(master_ip)
- self.LogInfo("Reserved cluster master IP (%s)", master_ip)
- except errors.AddressPoolError:
- pass
+ master_ip = self.cfg.GetClusterInfo().master_ip
+ try:
+ if pool.Contains(master_ip):
+ pool.Reserve(master_ip)
+ self.LogInfo("Reserved cluster master IP address (%s)", master_ip)
+ except errors.AddressPoolError:
+ self.LogWarning("Cannot reserve cluster master IP address (%s)",
+ master_ip)
if self.op.add_reserved_ips:
for ip in self.op.add_reserved_ips:
self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
if not self.network_uuid:
- raise errors.OpPrereqError("Network %s not found" % self.op.network_name,
- errors.ECODE_INVAL)
+ raise errors.OpPrereqError(("Network '%s' not found" %
+ self.op.network_name), errors.ECODE_NOENT)
+
+ self.share_locks[locking.LEVEL_NODEGROUP] = 1
self.needed_locks = {
locking.LEVEL_NETWORK: [self.network_uuid],
+ locking.LEVEL_NODEGROUP: locking.ALL_SET,
}
def CheckPrereq(self):
cluster.
"""
-
# Verify that the network is not conncted.
node_groups = [group.name
for group in self.cfg.GetAllNodeGroupsInfo().values()
- for net in group.networks.keys()
- if net == self.network_uuid]
+ if self.network_uuid in group.networks]
if node_groups:
- self.LogWarning("Nework '%s' is connected to the following"
- " node groups: %s" % (self.op.network_name,
- utils.CommaJoin(utils.NiceSort(node_groups))))
- raise errors.OpPrereqError("Network still connected",
- errors.ECODE_STATE)
+ self.LogWarning("Network '%s' is connected to the following"
+ " node groups: %s" %
+ (self.op.network_name,
+ utils.CommaJoin(utils.NiceSort(node_groups))))
+ raise errors.OpPrereqError("Network still connected", errors.ECODE_STATE)
def BuildHooksEnv(self):
"""Build hooks env.
def ExpandNames(self):
self.network_uuid = self.cfg.LookupNetwork(self.op.network_name)
- self.network = self.cfg.GetNetwork(self.network_uuid)
- if self.network is None:
- raise errors.OpPrereqError("Could not retrieve network '%s' (UUID: %s)" %
- (self.op.network_name, self.network_uuid),
- errors.ECODE_INVAL)
+ if self.network_uuid is None:
+ raise errors.OpPrereqError(("Network '%s' not found" %
+ self.op.network_name), errors.ECODE_NOENT)
+
self.needed_locks = {
locking.LEVEL_NETWORK: [self.network_uuid],
}
"""Check prerequisites.
"""
+ self.network = self.cfg.GetNetwork(self.network_uuid)
self.gateway = self.network.gateway
self.network_type = self.network.network_type
self.mac_prefix = self.network.mac_prefix
else:
self.gateway = self.op.gateway
if self.pool.IsReserved(self.gateway):
- raise errors.OpPrereqError("%s is already reserved" %
- self.gateway, errors.ECODE_INVAL)
+ raise errors.OpPrereqError("Gateway IP address '%s' is already"
+ " reserved" % self.gateway,
+ errors.ECODE_STATE)
if self.op.network_type:
if self.op.network_type == constants.VALUE_NONE:
if self.op.mac_prefix == constants.VALUE_NONE:
self.mac_prefix = None
else:
- utils.NormalizeAndValidateMac(self.op.mac_prefix + ":00:00:00")
- self.mac_prefix = self.op.mac_prefix
+ self.mac_prefix = \
+ utils.NormalizeAndValidateThreeOctetMacPrefix(self.op.mac_prefix)
if self.op.gateway6:
if self.op.gateway6 == constants.VALUE_NONE:
def ExpandNames(self, lu):
lu.needed_locks = {}
+ lu.share_locks = _ShareAll()
- self._all_networks = lu.cfg.GetAllNetworksInfo()
- name_to_uuid = dict((n.name, n.uuid) for n in self._all_networks.values())
+ self.do_locking = self.use_locking
- if not self.names:
- self.wanted = [name_to_uuid[name]
- for name in utils.NiceSort(name_to_uuid.keys())]
- else:
- # Accept names to be either names or UUIDs.
+ all_networks = lu.cfg.GetAllNetworksInfo()
+ name_to_uuid = dict((n.name, n.uuid) for n in all_networks.values())
+
+ if self.names:
missing = []
self.wanted = []
- all_uuid = frozenset(self._all_networks.keys())
for name in self.names:
- if name in all_uuid:
- self.wanted.append(name)
- elif name in name_to_uuid:
+ if name in name_to_uuid:
self.wanted.append(name_to_uuid[name])
else:
missing.append(name)
if missing:
raise errors.OpPrereqError("Some networks do not exist: %s" % missing,
errors.ECODE_NOENT)
+ else:
+ self.wanted = locking.ALL_SET
+
+ if self.do_locking:
+ lu.needed_locks[locking.LEVEL_NETWORK] = self.wanted
+ if query.NETQ_INST in self.requested_data:
+ lu.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
+ if query.NETQ_GROUP in self.requested_data:
+ lu.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
def DeclareLocks(self, lu, level):
pass
"""Computes the list of networks and their attributes.
"""
+ all_networks = lu.cfg.GetAllNetworksInfo()
+
+ network_uuids = self._GetNames(lu, all_networks.keys(),
+ locking.LEVEL_NETWORK)
+
+ name_to_uuid = dict((n.name, n.uuid) for n in all_networks.values())
+
do_instances = query.NETQ_INST in self.requested_data
- do_groups = do_instances or (query.NETQ_GROUP in self.requested_data)
- do_stats = query.NETQ_STATS in self.requested_data
+ do_groups = query.NETQ_GROUP in self.requested_data
- network_to_groups = None
network_to_instances = None
- stats = None
+ network_to_groups = None
# For NETQ_GROUP, we need to map network->[groups]
if do_groups:
all_groups = lu.cfg.GetAllNodeGroupsInfo()
- network_to_groups = dict((uuid, []) for uuid in self.wanted)
+ network_to_groups = dict((uuid, []) for uuid in network_uuids)
+ for _, group in all_groups.iteritems():
+ for net_uuid in network_uuids:
+ netparams = group.networks.get(net_uuid, None)
+ if netparams:
+ info = (group.name, netparams[constants.NIC_MODE],
+ netparams[constants.NIC_LINK])
- if do_instances:
- all_instances = lu.cfg.GetAllInstancesInfo()
- all_nodes = lu.cfg.GetAllNodesInfo()
- network_to_instances = dict((uuid, []) for uuid in self.wanted)
-
- for group in all_groups.values():
- if do_instances:
- group_nodes = [node.name for node in all_nodes.values() if
- node.group == group.uuid]
- group_instances = [instance for instance in all_instances.values()
- if instance.primary_node in group_nodes]
-
- for net_uuid in group.networks.keys():
- if net_uuid in network_to_groups:
- netparams = group.networks[net_uuid]
- mode = netparams[constants.NIC_MODE]
- link = netparams[constants.NIC_LINK]
- info = group.name + "(" + mode + ", " + link + ")"
network_to_groups[net_uuid].append(info)
- if do_instances:
- for instance in group_instances:
- for nic in instance.nics:
- if nic.network == self._all_networks[net_uuid].name:
- network_to_instances[net_uuid].append(instance.name)
- break
-
- if do_stats:
- stats = {}
- for uuid, net in self._all_networks.items():
- if uuid in self.wanted:
- pool = network.AddressPool(net)
- stats[uuid] = {
- "free_count": pool.GetFreeCount(),
- "reserved_count": pool.GetReservedCount(),
- "map": pool.GetMap(),
- "external_reservations":
- utils.CommaJoin(pool.GetExternalReservations()),
- }
-
- return query.NetworkQueryData([self._all_networks[uuid]
- for uuid in self.wanted],
+ if do_instances:
+ all_instances = lu.cfg.GetAllInstancesInfo()
+ network_to_instances = dict((uuid, []) for uuid in network_uuids)
+ for instance in all_instances.values():
+ for nic in instance.nics:
+ if nic.network:
+ net_uuid = name_to_uuid[nic.network]
+ if net_uuid in network_uuids:
+ network_to_instances[net_uuid].append(instance.name)
+ break
+
+ if query.NETQ_STATS in self.requested_data:
+ stats = \
+ dict((uuid,
+ self._GetStats(network.AddressPool(all_networks[uuid])))
+ for uuid in network_uuids)
+ else:
+ stats = None
+
+ return query.NetworkQueryData([all_networks[uuid]
+ for uuid in network_uuids],
network_to_groups,
network_to_instances,
stats)
+ @staticmethod
+ def _GetStats(pool):
+ """Returns statistics for a network address pool.
+
+ """
+ return {
+ "free_count": pool.GetFreeCount(),
+ "reserved_count": pool.GetReservedCount(),
+ "map": pool.GetMap(),
+ "external_reservations":
+ utils.CommaJoin(pool.GetExternalReservations()),
+ }
+
class LUNetworkQuery(NoHooksLU):
"""Logical unit for querying networks.
def CheckArguments(self):
self.nq = _NetworkQuery(qlang.MakeSimpleFilter("name", self.op.names),
- self.op.output_fields, False)
+ self.op.output_fields, self.op.use_locking)
def ExpandNames(self):
self.nq.ExpandNames(self)
self.network_link = self.op.network_link
self.network_uuid = self.cfg.LookupNetwork(self.network_name)
- self.network = self.cfg.GetNetwork(self.network_uuid)
- if self.network is None:
- raise errors.OpPrereqError("Network %s does not exist" %
- self.network_name, errors.ECODE_INVAL)
+ if self.network_uuid is None:
+ raise errors.OpPrereqError("Network '%s' does not exist" %
+ self.network_name, errors.ECODE_NOENT)
self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
- self.group = self.cfg.GetNodeGroup(self.group_uuid)
- if self.group is None:
- raise errors.OpPrereqError("Group %s does not exist" %
- self.group_name, errors.ECODE_INVAL)
+ if self.group_uuid is None:
+ raise errors.OpPrereqError("Group '%s' does not exist" %
+ self.group_name, errors.ECODE_NOENT)
self.needed_locks = {
locking.LEVEL_INSTANCE: [],
}
self.share_locks[locking.LEVEL_INSTANCE] = 1
+ if self.op.conflicts_check:
+ self.needed_locks[locking.LEVEL_NETWORK] = [self.network_uuid]
+ self.share_locks[locking.LEVEL_NETWORK] = 1
+
def DeclareLocks(self, level):
if level == locking.LEVEL_INSTANCE:
assert not self.needed_locks[locking.LEVEL_INSTANCE]
# Lock instances optimistically, needs verification once group lock has
# been acquired
- self.needed_locks[locking.LEVEL_INSTANCE] = \
- self.cfg.GetNodeGroupInstances(self.group_uuid)
+ if self.op.conflicts_check:
+ self.needed_locks[locking.LEVEL_INSTANCE] = \
+ self.cfg.GetNodeGroupInstances(self.group_uuid)
def BuildHooksEnv(self):
ret = {
"GROUP_NETWORK_MODE": self.network_mode,
"GROUP_NETWORK_LINK": self.network_link,
}
- ret.update(_BuildNetworkHookEnvByObject(self.network))
return ret
def BuildHooksNodes(self):
return (nodes, nodes)
def CheckPrereq(self):
- l = lambda value: utils.CommaJoin("%s: %s/%s" % (i[0], i[1], i[2])
- for i in value)
+ owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+
+ assert self.group_uuid in owned_groups
self.netparams = {
constants.NIC_MODE: self.network_mode,
}
objects.NIC.CheckParameterSyntax(self.netparams)
+ self.group = self.cfg.GetNodeGroup(self.group_uuid)
#if self.network_mode == constants.NIC_MODE_BRIDGED:
# _CheckNodeGroupBridgesExist(self, self.network_link, self.group_uuid)
self.connected = False
self.connected = True
return
- pool = network.AddressPool(self.network)
if self.op.conflicts_check:
- groupinstances = []
- for n in self.cfg.GetNodeGroupInstances(self.group_uuid):
- groupinstances.append(self.cfg.GetInstanceInfo(n))
- instances = [(instance.name, idx, nic.ip)
- for instance in groupinstances
- for idx, nic in enumerate(instance.nics)
- if (not nic.network and pool.Contains(nic.ip))]
- if instances:
- self.LogWarning("Following occurences use IPs from network %s"
- " that is about to connect to nodegroup %s: %s" %
- (self.network_name, self.group.name,
- l(instances)))
- raise errors.OpPrereqError("Conflicting IPs found."
- " Please remove/modify"
- " corresponding NICs",
- errors.ECODE_INVAL)
+ pool = network.AddressPool(self.cfg.GetNetwork(self.network_uuid))
+
+ _NetworkConflictCheck(self, lambda nic: pool.Contains(nic.ip),
+ "connect to")
def Exec(self, feedback_fn):
if self.connected:
self.cfg.Update(self.group, feedback_fn)
+def _NetworkConflictCheck(lu, check_fn, action):
+ """Checks for network interface conflicts with a network.
+
+ @type lu: L{LogicalUnit}
+ @type check_fn: callable receiving one parameter (L{objects.NIC}) and
+ returning boolean
+ @param check_fn: Function checking for conflict
+ @type action: string
+ @param action: Part of error message (see code)
+ @raise errors.OpPrereqError: If conflicting IP addresses are found.
+
+ """
+ # Check if locked instances are still correct
+ owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
+ _CheckNodeGroupInstances(lu.cfg, lu.group_uuid, owned_instances)
+
+ conflicts = []
+
+ for (_, instance) in lu.cfg.GetMultiInstanceInfo(owned_instances):
+ instconflicts = [(idx, nic.ip)
+ for (idx, nic) in enumerate(instance.nics)
+ if check_fn(nic)]
+
+ if instconflicts:
+ conflicts.append((instance.name, instconflicts))
+
+ if conflicts:
+ lu.LogWarning("IP addresses from network '%s', which is about to %s"
+ " node group '%s', are in use: %s" %
+ (lu.network_name, action, lu.group.name,
+ utils.CommaJoin(("%s: %s" %
+ (name, _FmtNetworkConflict(details)))
+ for (name, details) in conflicts)))
+
+ raise errors.OpPrereqError("Conflicting IP addresses found; "
+ " remove/modify the corresponding network"
+ " interfaces", errors.ECODE_STATE)
+
+
+def _FmtNetworkConflict(details):
+ """Utility for L{_NetworkConflictCheck}.
+
+ """
+ return utils.CommaJoin("nic%s/%s" % (idx, ipaddr)
+ for (idx, ipaddr) in details)
+
+
class LUNetworkDisconnect(LogicalUnit):
"""Disconnect a network to a nodegroup
self.group_name = self.op.group_name
self.network_uuid = self.cfg.LookupNetwork(self.network_name)
- self.network = self.cfg.GetNetwork(self.network_uuid)
- if self.network is None:
- raise errors.OpPrereqError("Network %s does not exist" %
- self.network_name, errors.ECODE_INVAL)
+ if self.network_uuid is None:
+ raise errors.OpPrereqError("Network '%s' does not exist" %
+ self.network_name, errors.ECODE_NOENT)
self.group_uuid = self.cfg.LookupNodeGroup(self.group_name)
- self.group = self.cfg.GetNodeGroup(self.group_uuid)
- if self.group is None:
- raise errors.OpPrereqError("Group %s does not exist" %
- self.group_name, errors.ECODE_INVAL)
+ if self.group_uuid is None:
+ raise errors.OpPrereqError("Group '%s' does not exist" %
+ self.group_name, errors.ECODE_NOENT)
self.needed_locks = {
locking.LEVEL_INSTANCE: [],
# Lock instances optimistically, needs verification once group lock has
# been acquired
- self.needed_locks[locking.LEVEL_INSTANCE] = \
+ if self.op.conflicts_check:
+ self.needed_locks[locking.LEVEL_INSTANCE] = \
self.cfg.GetNodeGroupInstances(self.group_uuid)
def BuildHooksEnv(self):
ret = {
"GROUP_NAME": self.group_name,
}
- ret.update(_BuildNetworkHookEnvByObject(self.network))
return ret
def BuildHooksNodes(self):
return (nodes, nodes)
def CheckPrereq(self):
- l = lambda value: utils.CommaJoin("%s: %s/%s" % (i[0], i[1], i[2])
- for i in value)
+ owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
+ assert self.group_uuid in owned_groups
+
+ self.group = self.cfg.GetNodeGroup(self.group_uuid)
self.connected = True
if self.network_uuid not in self.group.networks:
self.LogWarning("Network '%s' is not mapped to group '%s'",
return
if self.op.conflicts_check:
- groupinstances = []
- for n in self.cfg.GetNodeGroupInstances(self.group_uuid):
- groupinstances.append(self.cfg.GetInstanceInfo(n))
- instances = [(instance.name, idx, nic.ip)
- for instance in groupinstances
- for idx, nic in enumerate(instance.nics)
- if nic.network == self.network_name]
- if instances:
- self.LogWarning("Following occurences use IPs from network %s"
- " that is about to disconnected from the nodegroup"
- " %s: %s" %
- (self.network_name, self.group.name,
- l(instances)))
- raise errors.OpPrereqError("Conflicting IPs."
- " Please remove/modify"
- " corresponding NICS",
- errors.ECODE_INVAL)
+ _NetworkConflictCheck(self, lambda nic: nic.network == self.network_name,
+ "disconnect from")
def Exec(self, feedback_fn):
if not self.connected:
constants.QR_GROUP: _GroupQuery,
constants.QR_NETWORK: _NetworkQuery,
constants.QR_OS: _OsQuery,
+ constants.QR_EXTSTORAGE: _ExtStorageQuery,
constants.QR_EXPORT: _ExportQuery,
}
def _CheckForConflictingIp(lu, ip, node):
- """In case of conflicting ip raise error.
+ """In case of conflicting IP address raise error.
@type ip: string
- @param ip: ip address
+ @param ip: IP address
@type node: string
@param node: node name
"""
(conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
if conf_net is not None:
- raise errors.OpPrereqError("Conflicting IP found:"
- " %s <> %s." % (ip, conf_net),
- errors.ECODE_INVAL)
+ raise errors.OpPrereqError(("Conflicting IP address found: '%s' != '%s'" %
+ (ip, conf_net)),
+ errors.ECODE_STATE)
return (None, None)