4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
22 """Logical units dealing with node groups."""
26 from ganeti import constants
27 from ganeti import errors
28 from ganeti import locking
29 from ganeti import objects
30 from ganeti import qlang
31 from ganeti import query
32 from ganeti import utils
33 from ganeti.masterd import iallocator
34 from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, _QueryBase, \
36 from ganeti.cmdlib.common import _MergeAndVerifyHvState, \
37 _MergeAndVerifyDiskState, _GetWantedNodes, _GetUpdatedParams, \
38 _CheckNodeGroupInstances, _GetUpdatedIPolicy, \
39 _ComputeNewInstanceViolations, _GetDefaultIAllocator, _ShareAll, \
40 _CheckInstancesNodeGroups, _LoadNodeEvacResult, _MapInstanceDisksToNodes
42 import ganeti.masterd.instance
45 class LUGroupAdd(LogicalUnit):
46 """Logical unit for creating node groups.
50 HTYPE = constants.HTYPE_GROUP
53 def ExpandNames(self):
54 # We need the new group's UUID here so that we can create and acquire the
55 # corresponding lock. Later, in Exec(), we'll indicate to cfg.AddNodeGroup
56 # that it should not check whether the UUID exists in the configuration.
57 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
58 self.needed_locks = {}
59 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
61 def CheckPrereq(self):
62 """Check prerequisites.
64 This checks that the given group name is not an existing node group
69 existing_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
70 except errors.OpPrereqError:
73 raise errors.OpPrereqError("Desired group name '%s' already exists as a"
74 " node group (UUID: %s)" %
75 (self.op.group_name, existing_uuid),
79 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
82 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state, None)
84 self.new_hv_state = None
86 if self.op.disk_state:
87 self.new_disk_state = _MergeAndVerifyDiskState(self.op.disk_state, None)
89 self.new_disk_state = None
91 if self.op.diskparams:
92 for templ in constants.DISK_TEMPLATES:
93 if templ in self.op.diskparams:
94 utils.ForceDictType(self.op.diskparams[templ],
95 constants.DISK_DT_TYPES)
96 self.new_diskparams = self.op.diskparams
98 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
99 except errors.OpPrereqError, err:
100 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
103 self.new_diskparams = {}
106 cluster = self.cfg.GetClusterInfo()
107 full_ipolicy = cluster.SimpleFillIPolicy(self.op.ipolicy)
109 objects.InstancePolicy.CheckParameterSyntax(full_ipolicy, False)
110 except errors.ConfigurationError, err:
111 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
114 def BuildHooksEnv(self):
119 "GROUP_NAME": self.op.group_name,
122 def BuildHooksNodes(self):
123 """Build hooks nodes.
126 mn = self.cfg.GetMasterNode()
129 def Exec(self, feedback_fn):
130 """Add the node group to the cluster.
133 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
134 uuid=self.group_uuid,
135 alloc_policy=self.op.alloc_policy,
136 ndparams=self.op.ndparams,
137 diskparams=self.new_diskparams,
138 ipolicy=self.op.ipolicy,
139 hv_state_static=self.new_hv_state,
140 disk_state_static=self.new_disk_state)
142 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
143 del self.remove_locks[locking.LEVEL_NODEGROUP]
146 class LUGroupAssignNodes(NoHooksLU):
147 """Logical unit for assigning nodes to groups.
152 def ExpandNames(self):
153 # These raise errors.OpPrereqError on their own:
154 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
155 self.op.nodes = _GetWantedNodes(self, self.op.nodes)
157 # We want to lock all the affected nodes and groups. We have readily
158 # available the list of nodes, and the *destination* group. To gather the
159 # list of "source" groups, we need to fetch node information later on.
160 self.needed_locks = {
161 locking.LEVEL_NODEGROUP: set([self.group_uuid]),
162 locking.LEVEL_NODE: self.op.nodes,
165 def DeclareLocks(self, level):
166 if level == locking.LEVEL_NODEGROUP:
167 assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
169 # Try to get all affected nodes' groups without having the group or node
170 # lock yet. Needs verification later in the code flow.
171 groups = self.cfg.GetNodeGroupsFromNodes(self.op.nodes)
173 self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
175 def CheckPrereq(self):
176 """Check prerequisites.
179 assert self.needed_locks[locking.LEVEL_NODEGROUP]
180 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
181 frozenset(self.op.nodes))
183 expected_locks = (set([self.group_uuid]) |
184 self.cfg.GetNodeGroupsFromNodes(self.op.nodes))
185 actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
186 if actual_locks != expected_locks:
187 raise errors.OpExecError("Nodes changed groups since locks were acquired,"
188 " current groups are '%s', used to be '%s'" %
189 (utils.CommaJoin(expected_locks),
190 utils.CommaJoin(actual_locks)))
192 self.node_data = self.cfg.GetAllNodesInfo()
193 self.group = self.cfg.GetNodeGroup(self.group_uuid)
194 instance_data = self.cfg.GetAllInstancesInfo()
196 if self.group is None:
197 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
198 (self.op.group_name, self.group_uuid))
200 (new_splits, previous_splits) = \
201 self.CheckAssignmentForSplitInstances([(node, self.group_uuid)
202 for node in self.op.nodes],
203 self.node_data, instance_data)
206 fmt_new_splits = utils.CommaJoin(utils.NiceSort(new_splits))
208 if not self.op.force:
209 raise errors.OpExecError("The following instances get split by this"
210 " change and --force was not given: %s" %
213 self.LogWarning("This operation will split the following instances: %s",
217 self.LogWarning("In addition, these already-split instances continue"
218 " to be split across groups: %s",
219 utils.CommaJoin(utils.NiceSort(previous_splits)))
221 def Exec(self, feedback_fn):
222 """Assign nodes to a new group.
225 mods = [(node_name, self.group_uuid) for node_name in self.op.nodes]
227 self.cfg.AssignGroupNodes(mods)
230 def CheckAssignmentForSplitInstances(changes, node_data, instance_data):
231 """Check for split instances after a node assignment.
233 This method considers a series of node assignments as an atomic operation,
234 and returns information about split instances after applying the set of
237 In particular, it returns information about newly split instances, and
238 instances that were already split, and remain so after the change.
240 Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
243 @type changes: list of (node_name, new_group_uuid) pairs.
244 @param changes: list of node assignments to consider.
245 @param node_data: a dict with data for all nodes
246 @param instance_data: a dict with all instances to consider
248 @return: a list of instances that were previously okay and result split as a
249 consequence of this change, and a list of instances that were previously
250 split and this change does not fix.
253 changed_nodes = dict((node, group) for node, group in changes
254 if node_data[node].group != group)
256 all_split_instances = set()
257 previously_split_instances = set()
259 def InstanceNodes(instance):
260 return [instance.primary_node] + list(instance.secondary_nodes)
262 for inst in instance_data.values():
263 if inst.disk_template not in constants.DTS_INT_MIRROR:
266 instance_nodes = InstanceNodes(inst)
268 if len(set(node_data[node].group for node in instance_nodes)) > 1:
269 previously_split_instances.add(inst.name)
271 if len(set(changed_nodes.get(node, node_data[node].group)
272 for node in instance_nodes)) > 1:
273 all_split_instances.add(inst.name)
275 return (list(all_split_instances - previously_split_instances),
276 list(previously_split_instances & all_split_instances))
279 class _GroupQuery(_QueryBase):
280 FIELDS = query.GROUP_FIELDS
282 def ExpandNames(self, lu):
285 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
286 self._cluster = lu.cfg.GetClusterInfo()
287 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
290 self.wanted = [name_to_uuid[name]
291 for name in utils.NiceSort(name_to_uuid.keys())]
293 # Accept names to be either names or UUIDs.
296 all_uuid = frozenset(self._all_groups.keys())
298 for name in self.names:
300 self.wanted.append(name)
301 elif name in name_to_uuid:
302 self.wanted.append(name_to_uuid[name])
307 raise errors.OpPrereqError("Some groups do not exist: %s" %
308 utils.CommaJoin(missing),
311 def DeclareLocks(self, lu, level):
314 def _GetQueryData(self, lu):
315 """Computes the list of node groups and their attributes.
318 do_nodes = query.GQ_NODE in self.requested_data
319 do_instances = query.GQ_INST in self.requested_data
321 group_to_nodes = None
322 group_to_instances = None
324 # For GQ_NODE, we need to map group->[nodes], and group->[instances] for
325 # GQ_INST. The former is attainable with just GetAllNodesInfo(), but for the
326 # latter GetAllInstancesInfo() is not enough, for we have to go through
327 # instance->node. Hence, we will need to process nodes even if we only need
328 # instance information.
329 if do_nodes or do_instances:
330 all_nodes = lu.cfg.GetAllNodesInfo()
331 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
334 for node in all_nodes.values():
335 if node.group in group_to_nodes:
336 group_to_nodes[node.group].append(node.name)
337 node_to_group[node.name] = node.group
340 all_instances = lu.cfg.GetAllInstancesInfo()
341 group_to_instances = dict((uuid, []) for uuid in self.wanted)
343 for instance in all_instances.values():
344 node = instance.primary_node
345 if node in node_to_group:
346 group_to_instances[node_to_group[node]].append(instance.name)
349 # Do not pass on node information if it was not requested.
350 group_to_nodes = None
352 return query.GroupQueryData(self._cluster,
353 [self._all_groups[uuid]
354 for uuid in self.wanted],
355 group_to_nodes, group_to_instances,
356 query.GQ_DISKPARAMS in self.requested_data)
359 class LUGroupQuery(NoHooksLU):
360 """Logical unit for querying node groups.
365 def CheckArguments(self):
366 self.gq = _GroupQuery(qlang.MakeSimpleFilter("name", self.op.names),
367 self.op.output_fields, False)
369 def ExpandNames(self):
370 self.gq.ExpandNames(self)
372 def DeclareLocks(self, level):
373 self.gq.DeclareLocks(self, level)
375 def Exec(self, feedback_fn):
376 return self.gq.OldStyleQuery(self)
379 class LUGroupSetParams(LogicalUnit):
380 """Modifies the parameters of a node group.
383 HPATH = "group-modify"
384 HTYPE = constants.HTYPE_GROUP
387 def CheckArguments(self):
391 self.op.alloc_policy,
397 if all_changes.count(None) == len(all_changes):
398 raise errors.OpPrereqError("Please pass at least one modification",
401 def ExpandNames(self):
402 # This raises errors.OpPrereqError on its own:
403 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
405 self.needed_locks = {
406 locking.LEVEL_INSTANCE: [],
407 locking.LEVEL_NODEGROUP: [self.group_uuid],
410 self.share_locks[locking.LEVEL_INSTANCE] = 1
412 def DeclareLocks(self, level):
413 if level == locking.LEVEL_INSTANCE:
414 assert not self.needed_locks[locking.LEVEL_INSTANCE]
416 # Lock instances optimistically, needs verification once group lock has
418 self.needed_locks[locking.LEVEL_INSTANCE] = \
419 self.cfg.GetNodeGroupInstances(self.group_uuid)
422 def _UpdateAndVerifyDiskParams(old, new):
423 """Updates and verifies disk parameters.
426 new_params = _GetUpdatedParams(old, new)
427 utils.ForceDictType(new_params, constants.DISK_DT_TYPES)
430 def CheckPrereq(self):
431 """Check prerequisites.
434 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
436 # Check if locked instances are still correct
437 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
439 self.group = self.cfg.GetNodeGroup(self.group_uuid)
440 cluster = self.cfg.GetClusterInfo()
442 if self.group is None:
443 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
444 (self.op.group_name, self.group_uuid))
447 new_ndparams = _GetUpdatedParams(self.group.ndparams, self.op.ndparams)
448 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
449 self.new_ndparams = new_ndparams
451 if self.op.diskparams:
452 diskparams = self.group.diskparams
453 uavdp = self._UpdateAndVerifyDiskParams
454 # For each disktemplate subdict update and verify the values
455 new_diskparams = dict((dt,
456 uavdp(diskparams.get(dt, {}),
457 self.op.diskparams[dt]))
458 for dt in constants.DISK_TEMPLATES
459 if dt in self.op.diskparams)
460 # As we've all subdicts of diskparams ready, lets merge the actual
461 # dict with all updated subdicts
462 self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
464 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
465 except errors.OpPrereqError, err:
466 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
470 self.new_hv_state = _MergeAndVerifyHvState(self.op.hv_state,
471 self.group.hv_state_static)
473 if self.op.disk_state:
474 self.new_disk_state = \
475 _MergeAndVerifyDiskState(self.op.disk_state,
476 self.group.disk_state_static)
479 self.new_ipolicy = _GetUpdatedIPolicy(self.group.ipolicy,
483 new_ipolicy = cluster.SimpleFillIPolicy(self.new_ipolicy)
484 inst_filter = lambda inst: inst.name in owned_instances
485 instances = self.cfg.GetInstancesInfoByFilter(inst_filter).values()
486 gmi = ganeti.masterd.instance
488 _ComputeNewInstanceViolations(gmi.CalculateGroupIPolicy(cluster,
490 new_ipolicy, instances, self.cfg)
493 self.LogWarning("After the ipolicy change the following instances"
495 utils.CommaJoin(violations))
497 def BuildHooksEnv(self):
502 "GROUP_NAME": self.op.group_name,
503 "NEW_ALLOC_POLICY": self.op.alloc_policy,
506 def BuildHooksNodes(self):
507 """Build hooks nodes.
510 mn = self.cfg.GetMasterNode()
513 def Exec(self, feedback_fn):
514 """Modifies the node group.
520 self.group.ndparams = self.new_ndparams
521 result.append(("ndparams", str(self.group.ndparams)))
523 if self.op.diskparams:
524 self.group.diskparams = self.new_diskparams
525 result.append(("diskparams", str(self.group.diskparams)))
527 if self.op.alloc_policy:
528 self.group.alloc_policy = self.op.alloc_policy
531 self.group.hv_state_static = self.new_hv_state
533 if self.op.disk_state:
534 self.group.disk_state_static = self.new_disk_state
537 self.group.ipolicy = self.new_ipolicy
539 self.cfg.Update(self.group, feedback_fn)
543 class LUGroupRemove(LogicalUnit):
544 HPATH = "group-remove"
545 HTYPE = constants.HTYPE_GROUP
548 def ExpandNames(self):
549 # This will raises errors.OpPrereqError on its own:
550 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
551 self.needed_locks = {
552 locking.LEVEL_NODEGROUP: [self.group_uuid],
555 def CheckPrereq(self):
556 """Check prerequisites.
558 This checks that the given group name exists as a node group, that is
559 empty (i.e., contains no nodes), and that is not the last group of the
563 # Verify that the group is empty.
564 group_nodes = [node.name
565 for node in self.cfg.GetAllNodesInfo().values()
566 if node.group == self.group_uuid]
569 raise errors.OpPrereqError("Group '%s' not empty, has the following"
572 utils.CommaJoin(utils.NiceSort(group_nodes))),
575 # Verify the cluster would not be left group-less.
576 if len(self.cfg.GetNodeGroupList()) == 1:
577 raise errors.OpPrereqError("Group '%s' is the only group, cannot be"
578 " removed" % self.op.group_name,
581 def BuildHooksEnv(self):
586 "GROUP_NAME": self.op.group_name,
589 def BuildHooksNodes(self):
590 """Build hooks nodes.
593 mn = self.cfg.GetMasterNode()
596 def Exec(self, feedback_fn):
597 """Remove the node group.
601 self.cfg.RemoveNodeGroup(self.group_uuid)
602 except errors.ConfigurationError:
603 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
604 (self.op.group_name, self.group_uuid))
606 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
609 class LUGroupRename(LogicalUnit):
610 HPATH = "group-rename"
611 HTYPE = constants.HTYPE_GROUP
614 def ExpandNames(self):
615 # This raises errors.OpPrereqError on its own:
616 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
618 self.needed_locks = {
619 locking.LEVEL_NODEGROUP: [self.group_uuid],
622 def CheckPrereq(self):
623 """Check prerequisites.
625 Ensures requested new name is not yet used.
629 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
630 except errors.OpPrereqError:
633 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
634 " node group (UUID: %s)" %
635 (self.op.new_name, new_name_uuid),
638 def BuildHooksEnv(self):
643 "OLD_NAME": self.op.group_name,
644 "NEW_NAME": self.op.new_name,
647 def BuildHooksNodes(self):
648 """Build hooks nodes.
651 mn = self.cfg.GetMasterNode()
653 all_nodes = self.cfg.GetAllNodesInfo()
654 all_nodes.pop(mn, None)
657 run_nodes.extend(node.name for node in all_nodes.values()
658 if node.group == self.group_uuid)
660 return (run_nodes, run_nodes)
662 def Exec(self, feedback_fn):
663 """Rename the node group.
666 group = self.cfg.GetNodeGroup(self.group_uuid)
669 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
670 (self.op.group_name, self.group_uuid))
672 group.name = self.op.new_name
673 self.cfg.Update(group, feedback_fn)
675 return self.op.new_name
678 class LUGroupEvacuate(LogicalUnit):
679 HPATH = "group-evacuate"
680 HTYPE = constants.HTYPE_GROUP
683 def ExpandNames(self):
684 # This raises errors.OpPrereqError on its own:
685 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
687 if self.op.target_groups:
688 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
689 self.op.target_groups)
691 self.req_target_uuids = []
693 if self.group_uuid in self.req_target_uuids:
694 raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
695 " as a target group (targets are %s)" %
697 utils.CommaJoin(self.req_target_uuids)),
700 self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
702 self.share_locks = _ShareAll()
703 self.needed_locks = {
704 locking.LEVEL_INSTANCE: [],
705 locking.LEVEL_NODEGROUP: [],
706 locking.LEVEL_NODE: [],
709 def DeclareLocks(self, level):
710 if level == locking.LEVEL_INSTANCE:
711 assert not self.needed_locks[locking.LEVEL_INSTANCE]
713 # Lock instances optimistically, needs verification once node and group
714 # locks have been acquired
715 self.needed_locks[locking.LEVEL_INSTANCE] = \
716 self.cfg.GetNodeGroupInstances(self.group_uuid)
718 elif level == locking.LEVEL_NODEGROUP:
719 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
721 if self.req_target_uuids:
722 lock_groups = set([self.group_uuid] + self.req_target_uuids)
724 # Lock all groups used by instances optimistically; this requires going
725 # via the node before it's locked, requiring verification later on
726 lock_groups.update(group_uuid
728 self.owned_locks(locking.LEVEL_INSTANCE)
730 self.cfg.GetInstanceNodeGroups(instance_name))
732 # No target groups, need to lock all of them
733 lock_groups = locking.ALL_SET
735 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
737 elif level == locking.LEVEL_NODE:
738 # This will only lock the nodes in the group to be evacuated which
739 # contain actual instances
740 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
741 self._LockInstancesNodes()
743 # Lock all nodes in group to be evacuated and target groups
744 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
745 assert self.group_uuid in owned_groups
746 member_nodes = [node_name
747 for group in owned_groups
748 for node_name in self.cfg.GetNodeGroup(group).members]
749 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
751 def CheckPrereq(self):
752 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
753 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
754 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
756 assert owned_groups.issuperset(self.req_target_uuids)
757 assert self.group_uuid in owned_groups
759 # Check if locked instances are still correct
760 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
762 # Get instance information
763 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
765 # Check if node groups for locked instances are still correct
766 _CheckInstancesNodeGroups(self.cfg, self.instances,
767 owned_groups, owned_nodes, self.group_uuid)
769 if self.req_target_uuids:
770 # User requested specific target groups
771 self.target_uuids = self.req_target_uuids
773 # All groups except the one to be evacuated are potential targets
774 self.target_uuids = [group_uuid for group_uuid in owned_groups
775 if group_uuid != self.group_uuid]
777 if not self.target_uuids:
778 raise errors.OpPrereqError("There are no possible target groups",
781 def BuildHooksEnv(self):
786 "GROUP_NAME": self.op.group_name,
787 "TARGET_GROUPS": " ".join(self.target_uuids),
790 def BuildHooksNodes(self):
791 """Build hooks nodes.
794 mn = self.cfg.GetMasterNode()
796 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
798 run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
800 return (run_nodes, run_nodes)
802 def Exec(self, feedback_fn):
803 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
805 assert self.group_uuid not in self.target_uuids
807 req = iallocator.IAReqGroupChange(instances=instances,
808 target_groups=self.target_uuids)
809 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
811 ial.Run(self.op.iallocator)
814 raise errors.OpPrereqError("Can't compute group evacuation using"
815 " iallocator '%s': %s" %
816 (self.op.iallocator, ial.info),
819 jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
821 self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
822 len(jobs), self.op.group_name)
824 return ResultWithJobs(jobs)
827 class LUGroupVerifyDisks(NoHooksLU):
828 """Verifies the status of all disks in a node group.
833 def ExpandNames(self):
834 # Raises errors.OpPrereqError on its own if group can't be found
835 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
837 self.share_locks = _ShareAll()
838 self.needed_locks = {
839 locking.LEVEL_INSTANCE: [],
840 locking.LEVEL_NODEGROUP: [],
841 locking.LEVEL_NODE: [],
843 # This opcode is acquires all node locks in a group. LUClusterVerifyDisks
844 # starts one instance of this opcode for every group, which means all
845 # nodes will be locked for a short amount of time, so it's better to
846 # acquire the node allocation lock as well.
847 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
850 def DeclareLocks(self, level):
851 if level == locking.LEVEL_INSTANCE:
852 assert not self.needed_locks[locking.LEVEL_INSTANCE]
854 # Lock instances optimistically, needs verification once node and group
855 # locks have been acquired
856 self.needed_locks[locking.LEVEL_INSTANCE] = \
857 self.cfg.GetNodeGroupInstances(self.group_uuid)
859 elif level == locking.LEVEL_NODEGROUP:
860 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
862 self.needed_locks[locking.LEVEL_NODEGROUP] = \
863 set([self.group_uuid] +
864 # Lock all groups used by instances optimistically; this requires
865 # going via the node before it's locked, requiring verification
868 for instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
869 for group_uuid in self.cfg.GetInstanceNodeGroups(instance_name)])
871 elif level == locking.LEVEL_NODE:
872 # This will only lock the nodes in the group to be verified which contain
874 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
875 self._LockInstancesNodes()
877 # Lock all nodes in group to be verified
878 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
879 member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
880 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
882 def CheckPrereq(self):
883 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
884 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
885 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
887 assert self.group_uuid in owned_groups
889 # Check if locked instances are still correct
890 _CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)
892 # Get instance information
893 self.instances = dict(self.cfg.GetMultiInstanceInfo(owned_instances))
895 # Check if node groups for locked instances are still correct
896 _CheckInstancesNodeGroups(self.cfg, self.instances,
897 owned_groups, owned_nodes, self.group_uuid)
899 def Exec(self, feedback_fn):
900 """Verify integrity of cluster disks.
902 @rtype: tuple of three items
903 @return: a tuple of (dict of node-to-node_error, list of instances
904 which need activate-disks, dict of instance: (node, volume) for
909 res_instances = set()
912 nv_dict = _MapInstanceDisksToNodes(
913 [inst for inst in self.instances.values()
914 if inst.admin_state == constants.ADMINST_UP])
917 nodes = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
918 set(self.cfg.GetVmCapableNodeList()))
920 node_lvs = self.rpc.call_lv_list(nodes, [])
922 for (node, node_res) in node_lvs.items():
926 msg = node_res.fail_msg
928 logging.warning("Error enumerating LVs on node %s: %s", node, msg)
929 res_nodes[node] = msg
932 for lv_name, (_, _, lv_online) in node_res.payload.items():
933 inst = nv_dict.pop((node, lv_name), None)
934 if not (lv_online or inst is None):
935 res_instances.add(inst)
937 # any leftover items in nv_dict are missing LVs, let's arrange the data
939 for key, inst in nv_dict.iteritems():
940 res_missing.setdefault(inst, []).append(list(key))
942 return (res_nodes, list(res_instances), res_missing)