self.proc = processor
self.op = op
self.cfg = context.cfg
+ self.glm = context.glm
self.context = context
self.rpc = rpc
# Dicts used to declare locking needs to mcpu
self.needed_locks = None
- self.acquired_locks = {}
self.share_locks = dict.fromkeys(locking.LEVELS, 0)
self.add_locks = {}
self.remove_locks = {}
# future we might want to have different behaviors depending on the value
# of self.recalculate_locks[locking.LEVEL_NODE]
wanted_nodes = []
- for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
+ for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
instance = self.context.cfg.GetInstanceInfo(instance_name)
wanted_nodes.append(instance.primary_node)
if not primary_only:
"""
if self.do_locking:
- names = lu.acquired_locks[lock_level]
+ names = lu.glm.list_owned(lock_level)
else:
names = all_names
# caller specified names and we must keep the same order
assert self.names
- assert not self.do_locking or lu.acquired_locks[lock_level]
+ assert not self.do_locking or lu.glm.is_owned(lock_level)
missing = set(self.wanted).difference(names)
if missing:
release = []
# Determine which locks to release
- for name in lu.acquired_locks[level]:
+ for name in lu.glm.list_owned(level):
if should_release(name):
release.append(name)
else:
retain.append(name)
- assert len(lu.acquired_locks[level]) == (len(retain) + len(release))
+ assert len(lu.glm.list_owned(level)) == (len(retain) + len(release))
# Release just some locks
- lu.context.glm.release(level, names=release)
- lu.acquired_locks[level] = retain
+ lu.glm.release(level, names=release)
- assert frozenset(lu.context.glm.list_owned(level)) == frozenset(retain)
+ assert frozenset(lu.glm.list_owned(level)) == frozenset(retain)
else:
# Release everything
- lu.context.glm.release(level)
- del lu.acquired_locks[level]
+ lu.glm.release(level)
- assert not lu.context.glm.list_owned(level), "No locks should be owned"
+ assert not lu.glm.is_owned(level), "No locks should be owned"
def _RunPostHook(lu, node_name):
def ExpandNames(self):
if self.op.instances:
- self.wanted_names = []
- for name in self.op.instances:
- full_name = _ExpandInstanceName(self.cfg, name)
- self.wanted_names.append(full_name)
+ self.wanted_names = _GetWantedInstances(self, self.op.instances)
self.needed_locks = {
locking.LEVEL_NODE: [],
locking.LEVEL_INSTANCE: self.wanted_names,
locking.LEVEL_NODE: locking.ALL_SET,
locking.LEVEL_INSTANCE: locking.ALL_SET,
}
- self.share_locks = dict(((i, 1) for i in locking.LEVELS))
+ self.share_locks = dict.fromkeys(locking.LEVELS, 1)
def DeclareLocks(self, level):
if level == locking.LEVEL_NODE and self.wanted_names is not None:
"""
if self.wanted_names is None:
- self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
+ self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
self.wanted_instances = [self.cfg.GetInstanceInfo(name) for name
in self.wanted_names]
" drbd-based instances exist",
errors.ECODE_INVAL)
- node_list = self.acquired_locks[locking.LEVEL_NODE]
+ node_list = self.glm.list_owned(locking.LEVEL_NODE)
# if vg_name not None, checks given volume group on all nodes
if self.op.vg_name:
REG_BGL = False
_SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE)
+ def ExpandNames(self):
+ """Gather locks we need.
+
+ """
+ if self.op.node_names:
+ self.op.node_names = _GetWantedNodes(self, self.op.node_names)
+ lock_names = self.op.node_names
+ else:
+ lock_names = locking.ALL_SET
+
+ self.needed_locks = {
+ locking.LEVEL_NODE: lock_names,
+ }
+
def CheckPrereq(self):
"""Check prerequisites.
" not marked offline") % node_name,
errors.ECODE_STATE)
- def ExpandNames(self):
- """Gather locks we need.
-
- """
- if self.op.node_names:
- self.op.node_names = [_ExpandNodeName(self.cfg, name)
- for name in self.op.node_names]
- lock_names = self.op.node_names
- else:
- lock_names = locking.ALL_SET
-
- self.needed_locks = {
- locking.LEVEL_NODE: lock_names,
- }
-
def Exec(self, feedback_fn):
"""Execute OOB and return result if we expect any.
master_node = self.master_node
ret = []
- for idx, node in enumerate(self.nodes):
+ for idx, node in enumerate(utils.NiceSort(self.nodes,
+ key=lambda node: node.name)):
node_entry = [(constants.RS_NORMAL, node.name)]
ret.append(node_entry)
"""
# Locking is not used
- assert not (lu.acquired_locks or self.do_locking or self.use_locking)
+ assert not (compat.any(lu.glm.is_owned(level)
+ for level in locking.LEVELS
+ if level != locking.LEVEL_CLUSTER) or
+ self.do_locking or self.use_locking)
valid_nodes = [node.name
for node in lu.cfg.GetAllNodesInfo().values()
"""Computes the list of nodes and their attributes.
"""
- nodenames = self.acquired_locks[locking.LEVEL_NODE]
+ nodenames = self.glm.list_owned(locking.LEVEL_NODE)
volumes = self.rpc.call_node_volumes(nodenames)
ilist = [self.cfg.GetInstanceInfo(iname) for iname
"""Computes the list of nodes and their attributes.
"""
- self.nodes = self.acquired_locks[locking.LEVEL_NODE]
+ self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
# Always get name to sort by
if constants.SF_NAME in self.op.output_fields:
instances_keep = []
# Build list of instances to release
- for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
+ for instance_name in self.glm.list_owned(locking.LEVEL_INSTANCE):
instance = self.context.cfg.GetInstanceInfo(instance_name)
if (instance.disk_template in constants.DTS_INT_MIRROR and
self.op.node_name in instance.all_nodes):
_ReleaseLocks(self, locking.LEVEL_INSTANCE, keep=instances_keep)
- assert (set(self.acquired_locks.get(locking.LEVEL_INSTANCE, [])) ==
+ assert (set(self.glm.list_owned(locking.LEVEL_INSTANCE)) ==
set(instances_keep))
def BuildHooksEnv(self):
# Change the instance lock. This is definitely safe while we hold the BGL.
# Otherwise the new lock would have to be added in acquired mode.
assert self.REQ_BGL
- self.context.glm.remove(locking.LEVEL_INSTANCE, old_name)
- self.context.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
+ self.glm.remove(locking.LEVEL_INSTANCE, old_name)
+ self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
# re-read the instance from the configuration after rename
inst = self.cfg.GetInstanceInfo(self.op.new_name)
if len(self.lu.tasklets) == 1:
# It is safe to release locks only when we're the only tasklet
# in the LU
- _ReleaseLocks(self, locking.LEVEL_NODE,
+ _ReleaseLocks(self.lu, locking.LEVEL_NODE,
keep=[instance.primary_node, self.target_node])
else:
src_path = self.op.src_path
if src_node is None:
- locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
+ locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
exp_list = self.rpc.call_export_list(locked_nodes)
found = False
for node in exp_list:
# Lock member nodes of all locked groups
self.needed_locks[locking.LEVEL_NODE] = [node_name
- for group_uuid in self.acquired_locks[locking.LEVEL_NODEGROUP]
+ for group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
for node_name in self.cfg.GetNodeGroup(group_uuid).members]
else:
self._LockInstancesNodes()
"""Check prerequisites.
"""
- assert (locking.LEVEL_NODEGROUP in self.acquired_locks or
+ assert (self.glm.is_owned(locking.LEVEL_NODEGROUP) or
self.op.iallocator is None)
- if locking.LEVEL_NODEGROUP in self.acquired_locks:
+ owned_groups = self.glm.list_owned(locking.LEVEL_NODEGROUP)
+ if owned_groups:
groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
- prevgroups = self.acquired_locks[locking.LEVEL_NODEGROUP]
- if prevgroups != groups:
+ if owned_groups != groups:
raise errors.OpExecError("Node groups used by instance '%s' changed"
" since lock was acquired, current list is %r,"
" used to be '%s'" %
(self.op.instance_name,
utils.CommaJoin(groups),
- utils.CommaJoin(prevgroups)))
+ utils.CommaJoin(owned_groups)))
return LogicalUnit.CheckPrereq(self)
if remote_node is None:
self.remote_node_info = None
else:
- assert remote_node in self.lu.acquired_locks[locking.LEVEL_NODE], \
+ assert remote_node in self.lu.glm.list_owned(locking.LEVEL_NODE), \
"Remote node '%s' is not locked" % remote_node
self.remote_node_info = self.cfg.GetNodeInfo(remote_node)
_ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=touched_nodes)
# Release any owned node group
- if self.lu.context.glm.is_owned(locking.LEVEL_NODEGROUP):
+ if self.lu.glm.is_owned(locking.LEVEL_NODEGROUP):
_ReleaseLocks(self.lu, locking.LEVEL_NODEGROUP)
# Check whether disks are valid
if __debug__:
# Verify owned locks before starting operation
- owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE)
+ owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
assert set(owned_locks) == set(self.node_secondary_ip), \
("Incorrect node locks, owning %s, expected %s" %
(owned_locks, self.node_secondary_ip.keys()))
- owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_INSTANCE)
+ owned_locks = self.lu.glm.list_owned(locking.LEVEL_INSTANCE)
assert list(owned_locks) == [self.instance_name], \
"Instance '%s' not locked" % self.instance_name
- assert not self.lu.context.glm.is_owned(locking.LEVEL_NODEGROUP), \
+ assert not self.lu.glm.is_owned(locking.LEVEL_NODEGROUP), \
"Should not own any node group lock at this point"
if not self.disks:
if __debug__:
# Verify owned locks
- owned_locks = self.lu.context.glm.list_owned(locking.LEVEL_NODE)
+ owned_locks = self.lu.glm.list_owned(locking.LEVEL_NODE)
nodes = frozenset(self.node_secondary_ip)
assert ((self.early_release and not owned_locks) or
(not self.early_release and not (set(owned_locks) - nodes))), \
if not disks_ok:
raise errors.OpExecError("Cannot activate block device to grow")
+ # First run all grow ops in dry-run mode
+ for node in instance.all_nodes:
+ self.cfg.SetDiskID(disk, node)
+ result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, True)
+ result.Raise("Grow request failed to node %s" % node)
+
+ # We know that (as far as we can test) operations across different
+ # nodes will succeed, time to run it for real
for node in instance.all_nodes:
self.cfg.SetDiskID(disk, node)
- result = self.rpc.call_blockdev_grow(node, disk, self.op.amount)
+ result = self.rpc.call_blockdev_grow(node, disk, self.op.amount, False)
result.Raise("Grow request failed to node %s" % node)
# TODO: Rewrite code to work properly
"""
if self.wanted_names is None:
assert self.op.use_locking, "Locking was not used"
- self.wanted_names = self.acquired_locks[locking.LEVEL_INSTANCE]
+ self.wanted_names = self.glm.list_owned(locking.LEVEL_INSTANCE)
self.wanted_instances = [self.cfg.GetInstanceInfo(name)
for name in self.wanted_names]
that node.
"""
- self.nodes = self.acquired_locks[locking.LEVEL_NODE]
+ self.nodes = self.glm.list_owned(locking.LEVEL_NODE)
rpcresult = self.rpc.call_export_list(self.nodes)
result = {}
for node in rpcresult:
fqdn_warn = True
instance_name = self.op.instance_name
- locked_nodes = self.acquired_locks[locking.LEVEL_NODE]
+ locked_nodes = self.glm.list_owned(locking.LEVEL_NODE)
exportlist = self.rpc.call_export_list(locked_nodes)
found = False
for node in exportlist: