osname, node)
-def _CreateInstanceAllocRequest(op, disks, nics, beparams):
+def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_whitelist):
"""Wrapper around IAReqInstanceAlloc.
@param op: The instance opcode
@param disks: The computed disks
@param nics: The computed nics
@param beparams: The full filled beparams
+ @param node_whitelist: List of nodes which should appear as online to the
+ allocator (unless the node is already marked offline)
@returns: A filled L{iallocator.IAReqInstanceAlloc}
spindle_use=spindle_use,
disks=disks,
nics=[n.ToDict() for n in nics],
- hypervisor=op.hypervisor)
+ hypervisor=op.hypervisor,
+ node_whitelist=node_whitelist)
def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
# that group
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+
+ if self.op.opportunistic_locking:
+ self.opportunistic_locks[locking.LEVEL_NODE] = True
+ self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
else:
self.op.pnode = _ExpandNodeName(self.cfg, self.op.pnode)
nodelist = [self.op.pnode]
"""Run the allocator based on input opcode.
"""
+ if self.op.opportunistic_locking:
+ # Only consider nodes for which a lock is held
+ node_whitelist = self.owned_locks(locking.LEVEL_NODE)
+ else:
+ node_whitelist = None
+
#TODO Export network to iallocator so that it chooses a pnode
# in a nodegroup that has the desired network connected to
req = _CreateInstanceAllocRequest(self.op, self.disks,
- self.nics, self.be_full)
+ self.nics, self.be_full,
+ node_whitelist)
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
ial.Run(self.op.iallocator)
if not ial.success:
+ # When opportunistic locks are used only a temporary failure is generated
+ if self.op.opportunistic_locking:
+ ecode = errors.ECODE_TEMP_NORES
+ else:
+ ecode = errors.ECODE_NORES
+
raise errors.OpPrereqError("Can't compute nodes using"
" iallocator '%s': %s" %
(self.op.iallocator, ial.info),
- errors.ECODE_NORES)
+ ecode)
+
self.op.pnode = ial.result[0]
self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
self.op.instance_name, self.op.iallocator,
if self.op.iallocator:
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
+
+ if self.op.opportunistic_locking:
+ self.opportunistic_locks[locking.LEVEL_NODE] = True
+ self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
else:
nodeslist = []
for inst in self.op.instances:
default_vg = self.cfg.GetVGName()
ec_id = self.proc.GetECId()
+ if self.op.opportunistic_locking:
+ # Only consider nodes for which a lock is held
+ node_whitelist = self.owned_locks(locking.LEVEL_NODE)
+ else:
+ node_whitelist = None
+
insts = [_CreateInstanceAllocRequest(op, _ComputeDisks(op, default_vg),
_ComputeNics(op, cluster, None,
self.cfg, ec_id),
- _ComputeFullBeParams(op, cluster))
+ _ComputeFullBeParams(op, cluster),
+ node_whitelist)
for op in self.op.instances]
req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
locking.LEVEL_NODE: self.wanted,
}
+ if not self.names:
+ lu.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+
def DeclareLocks(self, lu, level):
pass
# - removing the removal operation altogether
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ # Allocations should be stopped while this LU runs with node locks, but
+ # it doesn't have to be exclusive
+ self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
+ self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
+
def DeclareLocks(self, level):
"""Last minute lock declaration."""
# All nodes are locked anyway, so nothing to do here.
REQ_BGL = False
def ExpandNames(self):
- self.needed_locks = {}
- # We need all nodes to be locked in order for RemoveExport to work, but we
- # don't need to lock the instance itself, as nothing will happen to it (and
- # we can remove exports also for a removed instance)
- self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
+ self.needed_locks = {
+ # We need all nodes to be locked in order for RemoveExport to work, but
+ # we don't need to lock the instance itself, as nothing will happen to it
+ # (and we can remove exports also for a removed instance)
+ locking.LEVEL_NODE: locking.ALL_SET,
+
+ # Removing backups is quick, so blocking allocations is justified
+ locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
+ }
+
+ # Allocations should be stopped while this LU runs with node locks, but it
+ # doesn't have to be exclusive
+ self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
def Exec(self, feedback_fn):
"""Remove any export.