Revision fab9573b lib/cmdlib.py
b/lib/cmdlib.py | ||
---|---|---|
4492 | 4492 |
|
4493 | 4493 |
def ExpandNames(self, lu): |
4494 | 4494 |
lu.needed_locks = {} |
4495 |
lu.share_locks[locking.LEVEL_INSTANCE] = 1 |
|
4496 |
lu.share_locks[locking.LEVEL_NODE] = 1 |
|
4495 |
lu.share_locks = _ShareAll() |
|
4497 | 4496 |
|
4498 | 4497 |
if self.names: |
4499 | 4498 |
self.wanted = _GetWantedInstances(lu, self.names) |
... | ... | |
4504 | 4503 |
query.IQ_LIVE in self.requested_data) |
4505 | 4504 |
if self.do_locking: |
4506 | 4505 |
lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted |
4506 |
lu.needed_locks[locking.LEVEL_NODEGROUP] = [] |
|
4507 | 4507 |
lu.needed_locks[locking.LEVEL_NODE] = [] |
4508 | 4508 |
lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE |
4509 | 4509 |
|
4510 |
self.do_grouplocks = (self.do_locking and |
|
4511 |
query.IQ_NODES in self.requested_data) |
|
4512 |
|
|
4510 | 4513 |
def DeclareLocks(self, lu, level): |
4511 |
if level == locking.LEVEL_NODE and self.do_locking: |
|
4512 |
lu._LockInstancesNodes() # pylint: disable-msg=W0212 |
|
4514 |
if self.do_locking: |
|
4515 |
if level == locking.LEVEL_NODEGROUP and self.do_grouplocks: |
|
4516 |
assert not lu.needed_locks[locking.LEVEL_NODEGROUP] |
|
4517 |
|
|
4518 |
# Lock all groups used by instances optimistically; this requires going |
|
4519 |
# via the node before it's locked, requiring verification later on |
|
4520 |
lu.needed_locks[locking.LEVEL_NODEGROUP] = \ |
|
4521 |
set(group_uuid |
|
4522 |
for instance_name in |
|
4523 |
lu.glm.list_owned(locking.LEVEL_INSTANCE) |
|
4524 |
for group_uuid in |
|
4525 |
lu.cfg.GetInstanceNodeGroups(instance_name)) |
|
4526 |
elif level == locking.LEVEL_NODE: |
|
4527 |
lu._LockInstancesNodes() # pylint: disable-msg=W0212 |
|
4528 |
|
|
4529 |
@staticmethod |
|
4530 |
def _CheckGroupLocks(lu): |
|
4531 |
owned_instances = frozenset(lu.glm.list_owned(locking.LEVEL_INSTANCE)) |
|
4532 |
owned_groups = frozenset(lu.glm.list_owned(locking.LEVEL_NODEGROUP)) |
|
4533 |
|
|
4534 |
# Check if node groups for locked instances are still correct |
|
4535 |
for instance_name in owned_instances: |
|
4536 |
inst_groups = lu.cfg.GetInstanceNodeGroups(instance_name) |
|
4537 |
if not owned_groups.issuperset(inst_groups): |
|
4538 |
raise errors.OpPrereqError("Instance %s's node groups changed since" |
|
4539 |
" locks were acquired, current groups are" |
|
4540 |
" are '%s', owning groups '%s'; retry the" |
|
4541 |
" operation" % |
|
4542 |
(instance_name, |
|
4543 |
utils.CommaJoin(inst_groups), |
|
4544 |
utils.CommaJoin(owned_groups)), |
|
4545 |
errors.ECODE_STATE) |
|
4513 | 4546 |
|
4514 | 4547 |
def _GetQueryData(self, lu): |
4515 | 4548 |
"""Computes the list of instances and their attributes. |
4516 | 4549 |
|
4517 | 4550 |
""" |
4551 |
if self.do_grouplocks: |
|
4552 |
self._CheckGroupLocks(lu) |
|
4553 |
|
|
4518 | 4554 |
cluster = lu.cfg.GetClusterInfo() |
4519 | 4555 |
all_info = lu.cfg.GetAllInstancesInfo() |
4520 | 4556 |
|
... | ... | |
4577 | 4613 |
else: |
4578 | 4614 |
consinfo = None |
4579 | 4615 |
|
4616 |
if query.IQ_NODES in self.requested_data: |
|
4617 |
node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"), |
|
4618 |
instance_list))) |
|
4619 |
nodes = dict((name, lu.cfg.GetNodeInfo(name)) for name in node_names) |
|
4620 |
groups = dict((uuid, lu.cfg.GetNodeGroup(uuid)) |
|
4621 |
for uuid in set(map(operator.attrgetter("group"), |
|
4622 |
nodes.values()))) |
|
4623 |
else: |
|
4624 |
nodes = None |
|
4625 |
groups = None |
|
4626 |
|
|
4580 | 4627 |
return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(), |
4581 | 4628 |
disk_usage, offline_nodes, bad_nodes, |
4582 |
live_data, wrongnode_inst, consinfo) |
|
4629 |
live_data, wrongnode_inst, consinfo, |
|
4630 |
nodes, groups) |
|
4583 | 4631 |
|
4584 | 4632 |
|
4585 | 4633 |
class LUQuery(NoHooksLU): |
Also available in: Unified diff