Revision da4a52a3 lib/cmdlib/group.py
b/lib/cmdlib/group.py | ||
---|---|---|
203 | 203 |
self.node_data, instance_data) |
204 | 204 |
|
205 | 205 |
if new_splits: |
206 |
fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits)) |
|
206 |
fmt_new_splits = utils.CommaJoin(utils.NiceSort( |
|
207 |
self.cfg.GetInstanceNames(new_splits))) |
|
207 | 208 |
|
208 | 209 |
if not self.op.force: |
209 | 210 |
raise errors.OpExecError("The following instances get split by this" |
... | ... | |
216 | 217 |
if previous_splits: |
217 | 218 |
self.LogWarning("In addition, these already-split instances continue" |
218 | 219 |
" to be split across groups: %s", |
219 |
utils.CommaJoin(utils.NiceSort(previous_splits))) |
|
220 |
utils.CommaJoin(utils.NiceSort( |
|
221 |
self.cfg.GetInstanceNames(previous_splits)))) |
|
220 | 222 |
|
221 | 223 |
def Exec(self, feedback_fn): |
222 | 224 |
"""Assign nodes to a new group. |
... | ... | |
262 | 264 |
|
263 | 265 |
if len(set(node_data[node_uuid].group |
264 | 266 |
for node_uuid in inst.all_nodes)) > 1: |
265 |
previously_split_instances.add(inst.name)
|
|
267 |
previously_split_instances.add(inst.uuid)
|
|
266 | 268 |
|
267 | 269 |
if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group) |
268 | 270 |
for node_uuid in inst.all_nodes)) > 1: |
269 |
all_split_instances.add(inst.name)
|
|
271 |
all_split_instances.add(inst.uuid)
|
|
270 | 272 |
|
271 | 273 |
return (list(all_split_instances - previously_split_instances), |
272 | 274 |
list(previously_split_instances & all_split_instances)) |
... | ... | |
339 | 341 |
for instance in all_instances.values(): |
340 | 342 |
node = instance.primary_node |
341 | 343 |
if node in node_to_group: |
342 |
group_to_instances[node_to_group[node]].append(instance.name)
|
|
344 |
group_to_instances[node_to_group[node]].append(instance.uuid)
|
|
343 | 345 |
|
344 | 346 |
if not do_nodes: |
345 | 347 |
# Do not pass on node information if it was not requested. |
... | ... | |
412 | 414 |
# Lock instances optimistically, needs verification once group lock has |
413 | 415 |
# been acquired |
414 | 416 |
self.needed_locks[locking.LEVEL_INSTANCE] = \ |
415 |
self.cfg.GetNodeGroupInstances(self.group_uuid) |
|
417 |
self.cfg.GetInstanceNames( |
|
418 |
self.cfg.GetNodeGroupInstances(self.group_uuid)) |
|
416 | 419 |
|
417 | 420 |
@staticmethod |
418 | 421 |
def _UpdateAndVerifyDiskParams(old, new): |
... | ... | |
427 | 430 |
"""Check prerequisites. |
428 | 431 |
|
429 | 432 |
""" |
430 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
|
433 |
owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
|
|
431 | 434 |
|
432 | 435 |
# Check if locked instances are still correct |
433 |
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances) |
|
436 |
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
|
|
434 | 437 |
|
435 | 438 |
self.group = self.cfg.GetNodeGroup(self.group_uuid) |
436 | 439 |
cluster = self.cfg.GetClusterInfo() |
... | ... | |
477 | 480 |
group_policy=True) |
478 | 481 |
|
479 | 482 |
new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy) |
480 |
inst_filter = lambda inst: inst.name in owned_instances |
|
481 |
instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values() |
|
483 |
instances = self.cfg.GetMultiInstanceInfoByName(owned_instance_names) |
|
482 | 484 |
gmi = ganeti.masterd.instance |
483 | 485 |
violations = \ |
484 | 486 |
ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster, |
... | ... | |
709 | 711 |
# Lock instances optimistically, needs verification once node and group |
710 | 712 |
# locks have been acquired |
711 | 713 |
self.needed_locks[locking.LEVEL_INSTANCE] = \ |
712 |
self.cfg.GetNodeGroupInstances(self.group_uuid) |
|
714 |
self.cfg.GetInstanceNames( |
|
715 |
self.cfg.GetNodeGroupInstances(self.group_uuid)) |
|
713 | 716 |
|
714 | 717 |
elif level == locking.LEVEL_NODEGROUP: |
715 | 718 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
... | ... | |
723 | 726 |
for instance_name in |
724 | 727 |
self.owned_locks(locking.LEVEL_INSTANCE) |
725 | 728 |
for group_uuid in |
726 |
self.cfg.GetInstanceNodeGroups(instance_name)) |
|
729 |
self.cfg.GetInstanceNodeGroups( |
|
730 |
self.cfg.GetInstanceInfoByName(instance_name) |
|
731 |
.uuid)) |
|
727 | 732 |
else: |
728 | 733 |
# No target groups, need to lock all of them |
729 | 734 |
lock_groups = locking.ALL_SET |
... | ... | |
746 | 751 |
self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids) |
747 | 752 |
|
748 | 753 |
def CheckPrereq(self): |
749 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
|
754 |
owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
|
|
750 | 755 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
751 | 756 |
owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE)) |
752 | 757 |
|
... | ... | |
754 | 759 |
assert self.group_uuid in owned_groups |
755 | 760 |
|
756 | 761 |
# Check if locked instances are still correct |
757 |
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances) |
|
762 |
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
|
|
758 | 763 |
|
759 | 764 |
# Get instance information |
760 |
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances)) |
|
765 |
self.instances = \ |
|
766 |
dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names)) |
|
761 | 767 |
|
762 | 768 |
# Check if node groups for locked instances are still correct |
763 | 769 |
CheckInstancesNodeGroups(self.cfg, self.instances, |
... | ... | |
797 | 803 |
return (run_nodes, run_nodes) |
798 | 804 |
|
799 | 805 |
def Exec(self, feedback_fn): |
800 |
instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
|
|
806 |
inst_names = list(self.owned_locks(locking.LEVEL_INSTANCE))
|
|
801 | 807 |
|
802 | 808 |
assert self.group_uuid not in self.target_uuids |
803 | 809 |
|
804 |
req = iallocator.IAReqGroupChange(instances=instances,
|
|
810 |
req = iallocator.IAReqGroupChange(instances=inst_names,
|
|
805 | 811 |
target_groups=self.target_uuids) |
806 | 812 |
ial = iallocator.IAllocator(self.cfg, self.rpc, req) |
807 | 813 |
|
... | ... | |
851 | 857 |
# Lock instances optimistically, needs verification once node and group |
852 | 858 |
# locks have been acquired |
853 | 859 |
self.needed_locks[locking.LEVEL_INSTANCE] = \ |
854 |
self.cfg.GetNodeGroupInstances(self.group_uuid) |
|
860 |
self.cfg.GetInstanceNames( |
|
861 |
self.cfg.GetNodeGroupInstances(self.group_uuid)) |
|
855 | 862 |
|
856 | 863 |
elif level == locking.LEVEL_NODEGROUP: |
857 | 864 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP] |
... | ... | |
863 | 870 |
# later on |
864 | 871 |
[group_uuid |
865 | 872 |
for instance_name in self.owned_locks(locking.LEVEL_INSTANCE) |
866 |
for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)]) |
|
873 |
for group_uuid in |
|
874 |
self.cfg.GetInstanceNodeGroups( |
|
875 |
self.cfg.GetInstanceInfoByName(instance_name).uuid)]) |
|
867 | 876 |
|
868 | 877 |
elif level == locking.LEVEL_NODE: |
869 | 878 |
# This will only lock the nodes in the group to be verified which contain |
... | ... | |
877 | 886 |
self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids) |
878 | 887 |
|
879 | 888 |
def CheckPrereq(self): |
880 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
|
|
889 |
owned_inst_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
|
|
881 | 890 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
882 | 891 |
owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE)) |
883 | 892 |
|
884 | 893 |
assert self.group_uuid in owned_groups |
885 | 894 |
|
886 | 895 |
# Check if locked instances are still correct |
887 |
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
|
|
896 |
CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_inst_names)
|
|
888 | 897 |
|
889 | 898 |
# Get instance information |
890 |
self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
|
|
899 |
self.instances = dict(self.cfg.GetMultiInstanceInfoByName(owned_inst_names))
|
|
891 | 900 |
|
892 | 901 |
# Check if node groups for locked instances are still correct |
893 | 902 |
CheckInstancesNodeGroups(self.cfg, self.instances, |
Also available in: Unified diff