Revision da4a52a3 lib/cmdlib/node.py
b/lib/cmdlib/node.py | ||
---|---|---|
476 | 476 |
|
477 | 477 |
if self.lock_instances: |
478 | 478 |
self.needed_locks[locking.LEVEL_INSTANCE] = \ |
479 |
frozenset(self.cfg.GetInstancesInfoByFilter(self._InstanceFilter)) |
|
479 |
self.cfg.GetInstanceNames( |
|
480 |
self.cfg.GetInstancesInfoByFilter(self._InstanceFilter).keys()) |
|
480 | 481 |
|
481 | 482 |
def BuildHooksEnv(self): |
482 | 483 |
"""Build hooks env. |
... | ... | |
512 | 513 |
self.cfg.GetInstancesInfoByFilter(self._InstanceFilter) |
513 | 514 |
|
514 | 515 |
# Verify instance locks |
515 |
owned_instances = self.owned_locks(locking.LEVEL_INSTANCE) |
|
516 |
wanted_instances = frozenset(affected_instances.keys()) |
|
517 |
if wanted_instances - owned_instances: |
|
516 |
owned_instance_names = self.owned_locks(locking.LEVEL_INSTANCE) |
|
517 |
wanted_instance_names = frozenset([inst.name for inst in |
|
518 |
affected_instances.values()]) |
|
519 |
if wanted_instance_names - owned_instance_names: |
|
518 | 520 |
raise errors.OpPrereqError("Instances affected by changing node %s's" |
519 | 521 |
" secondary IP address have changed since" |
520 | 522 |
" locks were acquired, wanted '%s', have" |
521 | 523 |
" '%s'; retry the operation" % |
522 | 524 |
(node.name, |
523 |
utils.CommaJoin(wanted_instances), |
|
524 |
utils.CommaJoin(owned_instances)), |
|
525 |
utils.CommaJoin(wanted_instance_names),
|
|
526 |
utils.CommaJoin(owned_instance_names)),
|
|
525 | 527 |
errors.ECODE_STATE) |
526 | 528 |
else: |
527 | 529 |
affected_instances = None |
... | ... | |
658 | 660 |
" passed, and the target node is the" |
659 | 661 |
" master", errors.ECODE_INVAL) |
660 | 662 |
|
661 |
assert not (frozenset(affected_instances) -
|
|
663 |
assert not (set([inst.name for inst in affected_instances.values()]) -
|
|
662 | 664 |
self.owned_locks(locking.LEVEL_INSTANCE)) |
663 | 665 |
|
664 | 666 |
if node.offline: |
665 | 667 |
if affected_instances: |
666 | 668 |
msg = ("Cannot change secondary IP address: offline node has" |
667 | 669 |
" instances (%s) configured to use it" % |
668 |
utils.CommaJoin(affected_instances.keys())) |
|
670 |
utils.CommaJoin( |
|
671 |
[inst.name for inst in affected_instances.values()])) |
|
669 | 672 |
raise errors.OpPrereqError(msg, errors.ECODE_STATE) |
670 | 673 |
else: |
671 | 674 |
# On online nodes, check that no instances are running, and that |
... | ... | |
931 | 934 |
|
932 | 935 |
def CheckPrereq(self): |
933 | 936 |
# Verify locks |
934 |
owned_instances = self.owned_locks(locking.LEVEL_INSTANCE) |
|
937 |
owned_instance_names = self.owned_locks(locking.LEVEL_INSTANCE)
|
|
935 | 938 |
owned_nodes = self.owned_locks(locking.LEVEL_NODE) |
936 | 939 |
owned_groups = self.owned_locks(locking.LEVEL_NODEGROUP) |
937 | 940 |
|
... | ... | |
959 | 962 |
self.instances = self._DetermineInstances() |
960 | 963 |
self.instance_names = [i.name for i in self.instances] |
961 | 964 |
|
962 |
if set(self.instance_names) != owned_instances: |
|
965 |
if set(self.instance_names) != owned_instance_names:
|
|
963 | 966 |
raise errors.OpExecError("Instances on node '%s' changed since locks" |
964 | 967 |
" were acquired, current instances are '%s'," |
965 | 968 |
" used to be '%s'; retry the operation" % |
966 | 969 |
(self.op.node_name, |
967 | 970 |
utils.CommaJoin(self.instance_names), |
968 |
utils.CommaJoin(owned_instances))) |
|
971 |
utils.CommaJoin(owned_instance_names)))
|
|
969 | 972 |
|
970 | 973 |
if self.instance_names: |
971 | 974 |
self.LogInfo("Evacuating instances from node '%s': %s", |
... | ... | |
1206 | 1209 |
node_to_secondary = dict([(uuid, set()) for uuid in node_uuids]) |
1207 | 1210 |
|
1208 | 1211 |
inst_data = lu.cfg.GetAllInstancesInfo() |
1212 |
inst_uuid_to_inst_name = {} |
|
1209 | 1213 |
|
1210 | 1214 |
for inst in inst_data.values(): |
1215 |
inst_uuid_to_inst_name[inst.uuid] = inst.name |
|
1211 | 1216 |
if inst.primary_node in node_to_primary: |
1212 |
node_to_primary[inst.primary_node].add(inst.name)
|
|
1217 |
node_to_primary[inst.primary_node].add(inst.uuid)
|
|
1213 | 1218 |
for secnode in inst.secondary_nodes: |
1214 | 1219 |
if secnode in node_to_secondary: |
1215 |
node_to_secondary[secnode].add(inst.name)
|
|
1220 |
node_to_secondary[secnode].add(inst.uuid)
|
|
1216 | 1221 |
else: |
1217 | 1222 |
node_to_primary = None |
1218 | 1223 |
node_to_secondary = None |
1224 |
inst_uuid_to_inst_name = None |
|
1219 | 1225 |
|
1220 | 1226 |
if query.NQ_OOB in self.requested_data: |
1221 | 1227 |
oob_support = dict((uuid, bool(SupportsOob(lu.cfg, node))) |
... | ... | |
1230 | 1236 |
|
1231 | 1237 |
return query.NodeQueryData([all_info[uuid] for uuid in node_uuids], |
1232 | 1238 |
live_data, lu.cfg.GetMasterNode(), |
1233 |
node_to_primary, node_to_secondary, groups, |
|
1234 |
oob_support, lu.cfg.GetClusterInfo()) |
|
1239 |
node_to_primary, node_to_secondary, |
|
1240 |
inst_uuid_to_inst_name, groups, oob_support, |
|
1241 |
lu.cfg.GetClusterInfo()) |
|
1235 | 1242 |
|
1236 | 1243 |
|
1237 | 1244 |
class LUNodeQuery(NoHooksLU): |
... | ... | |
1487 | 1494 |
raise errors.OpPrereqError("Node is the master node, failover to another" |
1488 | 1495 |
" node is required", errors.ECODE_INVAL) |
1489 | 1496 |
|
1490 |
for instance_name, instance in self.cfg.GetAllInstancesInfo().items():
|
|
1497 |
for _, instance in self.cfg.GetAllInstancesInfo().items():
|
|
1491 | 1498 |
if node.uuid in instance.all_nodes: |
1492 | 1499 |
raise errors.OpPrereqError("Instance %s is still running on the node," |
1493 |
" please remove first" % instance_name,
|
|
1500 |
" please remove first" % instance.name,
|
|
1494 | 1501 |
errors.ECODE_INVAL) |
1495 | 1502 |
self.op.node_name = node.name |
1496 | 1503 |
self.node = node |
Also available in: Unified diff