Revision 1c3231aa lib/cmdlib/group.py
b/lib/cmdlib/group.py | ||
---|---|---|
152 | 152 |
def ExpandNames(self): |
153 | 153 |
# These raise errors.OpPrereqError on their own: |
154 | 154 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name) |
155 |
self.op.nodes = GetWantedNodes(self, self.op.nodes)
|
|
155 |
(self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
|
|
156 | 156 |
|
157 | 157 |
# We want to lock all the affected nodes and groups. We have readily |
158 | 158 |
# available the list of nodes, and the *destination* group. To gather the |
159 | 159 |
# list of "source" groups, we need to fetch node information later on. |
160 | 160 |
self.needed_locks = { |
161 | 161 |
locking.LEVEL_NODEGROUP: set([self.group_uuid]), |
162 |
locking.LEVEL_NODE: self.op.nodes, |
|
162 |
locking.LEVEL_NODE: self.op.node_uuids,
|
|
163 | 163 |
} |
164 | 164 |
|
165 | 165 |
def DeclareLocks(self, level): |
... | ... | |
168 | 168 |
|
169 | 169 |
# Try to get all affected nodes' groups without having the group or node |
170 | 170 |
# lock yet. Needs verification later in the code flow. |
171 |
groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes) |
|
171 |
groups = self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids)
|
|
172 | 172 |
|
173 | 173 |
self.needed_locks[locking.LEVEL_NODEGROUP].update(groups) |
174 | 174 |
|
... | ... | |
178 | 178 |
""" |
179 | 179 |
assert self.needed_locks[locking.LEVEL_NODEGROUP] |
180 | 180 |
assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) == |
181 |
frozenset(self.op.nodes)) |
|
181 |
frozenset(self.op.node_uuids))
|
|
182 | 182 |
|
183 | 183 |
expected_locks = (set([self.group_uuid]) | |
184 |
self.cfg.GetNodeGroupsFromNodes(self.op.nodes)) |
|
184 |
self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids))
|
|
185 | 185 |
actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP) |
186 | 186 |
if actual_locks != expected_locks: |
187 | 187 |
raise errors.OpExecError("Nodes changed groups since locks were acquired," |
... | ... | |
198 | 198 |
(self.op.group_name, self.group_uuid)) |
199 | 199 |
|
200 | 200 |
(new_splits, previous_splits) = \ |
201 |
self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
|
|
202 |
for node in self.op.nodes],
|
|
201 |
self.CheckAssignmentForSplitInstances([(uuid, self.group_uuid)
|
|
202 |
for uuid in self.op.node_uuids],
|
|
203 | 203 |
self.node_data, instance_data) |
204 | 204 |
|
205 | 205 |
if new_splits: |
... | ... | |
222 | 222 |
"""Assign nodes to a new group. |
223 | 223 |
|
224 | 224 |
""" |
225 |
mods = [(node_name, self.group_uuid) for node_name in self.op.nodes]
|
|
225 |
mods = [(node_uuid, self.group_uuid) for node_uuid in self.op.node_uuids]
|
|
226 | 226 |
|
227 | 227 |
self.cfg.AssignGroupNodes(mods) |
228 | 228 |
|
... | ... | |
240 | 240 |
Only instances whose disk template is listed in constants.DTS_INT_MIRROR are |
241 | 241 |
considered. |
242 | 242 |
|
243 |
@type changes: list of (node_name, new_group_uuid) pairs.
|
|
243 |
@type changes: list of (node_uuid, new_group_uuid) pairs.
|
|
244 | 244 |
@param changes: list of node assignments to consider. |
245 | 245 |
@param node_data: a dict with data for all nodes |
246 | 246 |
@param instance_data: a dict with all instances to consider |
... | ... | |
250 | 250 |
split and this change does not fix. |
251 | 251 |
|
252 | 252 |
""" |
253 |
changed_nodes = dict((node, group) for node, group in changes
|
|
254 |
if node_data[node].group != group)
|
|
253 |
changed_nodes = dict((uuid, group) for uuid, group in changes
|
|
254 |
if node_data[uuid].group != group)
|
|
255 | 255 |
|
256 | 256 |
all_split_instances = set() |
257 | 257 |
previously_split_instances = set() |
258 | 258 |
|
259 |
def InstanceNodes(instance): |
|
260 |
return [instance.primary_node] + list(instance.secondary_nodes) |
|
261 |
|
|
262 | 259 |
for inst in instance_data.values(): |
263 | 260 |
if inst.disk_template not in constants.DTS_INT_MIRROR: |
264 | 261 |
continue |
265 | 262 |
|
266 |
instance_nodes = InstanceNodes(inst) |
|
267 |
|
|
268 |
if len(set(node_data[node].group for node in instance_nodes)) > 1: |
|
263 |
if len(set(node_data[node_uuid].group |
|
264 |
for node_uuid in inst.all_nodes)) > 1: |
|
269 | 265 |
previously_split_instances.add(inst.name) |
270 | 266 |
|
271 |
if len(set(changed_nodes.get(node, node_data[node].group)
|
|
272 |
for node in instance_nodes)) > 1:
|
|
267 |
if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
|
|
268 |
for node_uuid in inst.all_nodes)) > 1:
|
|
273 | 269 |
all_split_instances.add(inst.name) |
274 | 270 |
|
275 | 271 |
return (list(all_split_instances - previously_split_instances), |
... | ... | |
333 | 329 |
|
334 | 330 |
for node in all_nodes.values(): |
335 | 331 |
if node.group in group_to_nodes: |
336 |
group_to_nodes[node.group].append(node.name)
|
|
337 |
node_to_group[node.name] = node.group
|
|
332 |
group_to_nodes[node.group].append(node.uuid)
|
|
333 |
node_to_group[node.uuid] = node.group
|
|
338 | 334 |
|
339 | 335 |
if do_instances: |
340 | 336 |
all_instances = lu.cfg.GetAllInstancesInfo() |
... | ... | |
561 | 557 |
|
562 | 558 |
""" |
563 | 559 |
# Verify that the group is empty. |
564 |
group_nodes = [node.name
|
|
560 |
group_nodes = [node.uuid
|
|
565 | 561 |
for node in self.cfg.GetAllNodesInfo().values() |
566 | 562 |
if node.group == self.group_uuid] |
567 | 563 |
|
... | ... | |
654 | 650 |
all_nodes.pop(mn, None) |
655 | 651 |
|
656 | 652 |
run_nodes = [mn] |
657 |
run_nodes.extend(node.name for node in all_nodes.values()
|
|
653 |
run_nodes.extend(node.uuid for node in all_nodes.values()
|
|
658 | 654 |
if node.group == self.group_uuid) |
659 | 655 |
|
660 | 656 |
return (run_nodes, run_nodes) |
... | ... | |
743 | 739 |
# Lock all nodes in group to be evacuated and target groups |
744 | 740 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
745 | 741 |
assert self.group_uuid in owned_groups |
746 |
member_nodes = [node_name |
|
747 |
for group in owned_groups |
|
748 |
for node_name in self.cfg.GetNodeGroup(group).members] |
|
749 |
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes) |
|
742 |
member_node_uuids = [node_uuid |
|
743 |
for group in owned_groups |
|
744 |
for node_uuid in |
|
745 |
self.cfg.GetNodeGroup(group).members] |
|
746 |
self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids) |
|
750 | 747 |
|
751 | 748 |
def CheckPrereq(self): |
752 | 749 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
753 | 750 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
754 |
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) |
|
751 |
owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
|
|
755 | 752 |
|
756 | 753 |
assert owned_groups.issuperset(self.req_target_uuids) |
757 | 754 |
assert self.group_uuid in owned_groups |
... | ... | |
764 | 761 |
|
765 | 762 |
# Check if node groups for locked instances are still correct |
766 | 763 |
CheckInstancesNodeGroups(self.cfg, self.instances, |
767 |
owned_groups, owned_nodes, self.group_uuid) |
|
764 |
owned_groups, owned_node_uuids, self.group_uuid)
|
|
768 | 765 |
|
769 | 766 |
if self.req_target_uuids: |
770 | 767 |
# User requested specific target groups |
... | ... | |
876 | 873 |
|
877 | 874 |
# Lock all nodes in group to be verified |
878 | 875 |
assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP) |
879 |
member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members |
|
880 |
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes) |
|
876 |
member_node_uuids = self.cfg.GetNodeGroup(self.group_uuid).members
|
|
877 |
self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
|
|
881 | 878 |
|
882 | 879 |
def CheckPrereq(self): |
883 | 880 |
owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) |
884 | 881 |
owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
885 |
owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) |
|
882 |
owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
|
|
886 | 883 |
|
887 | 884 |
assert self.group_uuid in owned_groups |
888 | 885 |
|
... | ... | |
894 | 891 |
|
895 | 892 |
# Check if node groups for locked instances are still correct |
896 | 893 |
CheckInstancesNodeGroups(self.cfg, self.instances, |
897 |
owned_groups, owned_nodes, self.group_uuid) |
|
894 |
owned_groups, owned_node_uuids, self.group_uuid)
|
|
898 | 895 |
|
899 | 896 |
def Exec(self, feedback_fn): |
900 | 897 |
"""Verify integrity of cluster disks. |
... | ... | |
913 | 910 |
[inst for inst in self.instances.values() if inst.disks_active]) |
914 | 911 |
|
915 | 912 |
if nv_dict: |
916 |
nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) & |
|
917 |
set(self.cfg.GetVmCapableNodeList())) |
|
913 |
node_uuids = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
|
|
914 |
set(self.cfg.GetVmCapableNodeList()))
|
|
918 | 915 |
|
919 |
node_lvs = self.rpc.call_lv_list(nodes, []) |
|
916 |
node_lvs = self.rpc.call_lv_list(node_uuids, [])
|
|
920 | 917 |
|
921 |
for (node, node_res) in node_lvs.items(): |
|
918 |
for (node_uuid, node_res) in node_lvs.items():
|
|
922 | 919 |
if node_res.offline: |
923 | 920 |
continue |
924 | 921 |
|
925 | 922 |
msg = node_res.fail_msg |
926 | 923 |
if msg: |
927 |
logging.warning("Error enumerating LVs on node %s: %s", node, msg) |
|
928 |
res_nodes[node] = msg |
|
924 |
logging.warning("Error enumerating LVs on node %s: %s", |
|
925 |
self.cfg.GetNodeName(node_uuid), msg) |
|
926 |
res_nodes[node_uuid] = msg |
|
929 | 927 |
continue |
930 | 928 |
|
931 | 929 |
for lv_name, (_, _, lv_online) in node_res.payload.items(): |
932 |
inst = nv_dict.pop((node, lv_name), None) |
|
930 |
inst = nv_dict.pop((node_uuid, lv_name), None)
|
|
933 | 931 |
if not (lv_online or inst is None): |
934 | 932 |
res_instances.add(inst) |
935 | 933 |
|
Also available in: Unified diff