11875 |
11875 |
return self.op.new_name
|
11876 |
11876 |
|
11877 |
11877 |
|
|
11878 |
class LUGroupEvacuate(LogicalUnit):
|
|
11879 |
HPATH = "group-evacuate"
|
|
11880 |
HTYPE = constants.HTYPE_GROUP
|
|
11881 |
REQ_BGL = False
|
|
11882 |
|
|
11883 |
def ExpandNames(self):
|
|
11884 |
# This raises errors.OpPrereqError on its own:
|
|
11885 |
self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
|
|
11886 |
|
|
11887 |
if self.op.target_groups:
|
|
11888 |
self.req_target_uuids = map(self.cfg.LookupNodeGroup,
|
|
11889 |
self.op.target_groups)
|
|
11890 |
else:
|
|
11891 |
self.req_target_uuids = []
|
|
11892 |
|
|
11893 |
if self.group_uuid in self.req_target_uuids:
|
|
11894 |
raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
|
|
11895 |
" as a target group (targets are %s)" %
|
|
11896 |
(self.group_uuid,
|
|
11897 |
utils.CommaJoin(self.req_target_uuids)),
|
|
11898 |
errors.ECODE_INVAL)
|
|
11899 |
|
|
11900 |
if not self.op.iallocator:
|
|
11901 |
# Use default iallocator
|
|
11902 |
self.op.iallocator = self.cfg.GetDefaultIAllocator()
|
|
11903 |
|
|
11904 |
if not self.op.iallocator:
|
|
11905 |
raise errors.OpPrereqError("No iallocator was specified, neither in the"
|
|
11906 |
" opcode nor as a cluster-wide default",
|
|
11907 |
errors.ECODE_INVAL)
|
|
11908 |
|
|
11909 |
self.share_locks = dict.fromkeys(locking.LEVELS, 1)
|
|
11910 |
self.needed_locks = {
|
|
11911 |
locking.LEVEL_INSTANCE: [],
|
|
11912 |
locking.LEVEL_NODEGROUP: [],
|
|
11913 |
locking.LEVEL_NODE: [],
|
|
11914 |
}
|
|
11915 |
|
|
11916 |
def DeclareLocks(self, level):
|
|
11917 |
if level == locking.LEVEL_INSTANCE:
|
|
11918 |
assert not self.needed_locks[locking.LEVEL_INSTANCE]
|
|
11919 |
|
|
11920 |
# Lock instances optimistically, needs verification once node and group
|
|
11921 |
# locks have been acquired
|
|
11922 |
self.needed_locks[locking.LEVEL_INSTANCE] = \
|
|
11923 |
self.cfg.GetNodeGroupInstances(self.group_uuid)
|
|
11924 |
|
|
11925 |
elif level == locking.LEVEL_NODEGROUP:
|
|
11926 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
|
|
11927 |
|
|
11928 |
if self.req_target_uuids:
|
|
11929 |
lock_groups = set([self.group_uuid] + self.req_target_uuids)
|
|
11930 |
|
|
11931 |
# Lock all groups used by instances optimistically; this requires going
|
|
11932 |
# via the node before it's locked, requiring verification later on
|
|
11933 |
lock_groups.update(group_uuid
|
|
11934 |
for instance_name in
|
|
11935 |
self.glm.list_owned(locking.LEVEL_INSTANCE)
|
|
11936 |
for group_uuid in
|
|
11937 |
self.cfg.GetInstanceNodeGroups(instance_name))
|
|
11938 |
else:
|
|
11939 |
# No target groups, need to lock all of them
|
|
11940 |
lock_groups = locking.ALL_SET
|
|
11941 |
|
|
11942 |
self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
|
|
11943 |
|
|
11944 |
elif level == locking.LEVEL_NODE:
|
|
11945 |
# This will only lock the nodes in the group to be evacuated which
|
|
11946 |
# contain actual instances
|
|
11947 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
|
11948 |
self._LockInstancesNodes()
|
|
11949 |
|
|
11950 |
# Lock all nodes in group to be evacuated
|
|
11951 |
assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
|
|
11952 |
member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
|
|
11953 |
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
|
|
11954 |
|
|
11955 |
def CheckPrereq(self):
|
|
11956 |
owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
|
|
11957 |
owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
|
|
11958 |
owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
|
|
11959 |
|
|
11960 |
assert owned_groups.issuperset(self.req_target_uuids)
|
|
11961 |
assert self.group_uuid in owned_groups
|
|
11962 |
|
|
11963 |
# Check if locked instances are still correct
|
|
11964 |
wanted_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
|
|
11965 |
if owned_instances != wanted_instances:
|
|
11966 |
raise errors.OpPrereqError("Instances in node group to be evacuated (%s)"
|
|
11967 |
" changed since locks were acquired, wanted"
|
|
11968 |
" %s, have %s; retry the operation" %
|
|
11969 |
(self.group_uuid,
|
|
11970 |
utils.CommaJoin(wanted_instances),
|
|
11971 |
utils.CommaJoin(owned_instances)))
|
|
11972 |
|
|
11973 |
# Get instance information
|
|
11974 |
self.instances = dict((name, self.cfg.GetInstanceInfo(name))
|
|
11975 |
for name in owned_instances)
|
|
11976 |
|
|
11977 |
# Check if node groups for locked instances are still correct
|
|
11978 |
for instance_name in owned_instances:
|
|
11979 |
inst = self.instances[instance_name]
|
|
11980 |
assert self.group_uuid in self.cfg.GetInstanceNodeGroups(instance_name), \
|
|
11981 |
"Instance %s has no node in group %s" % (instance_name, self.group_uuid)
|
|
11982 |
assert owned_nodes.issuperset(inst.all_nodes), \
|
|
11983 |
"Instance %s's nodes changed while we kept the lock" % instance_name
|
|
11984 |
|
|
11985 |
inst_groups = self.cfg.GetInstanceNodeGroups(instance_name)
|
|
11986 |
if not owned_groups.issuperset(inst_groups):
|
|
11987 |
raise errors.OpPrereqError("Instance's node groups changed since locks"
|
|
11988 |
" were acquired, current groups are '%s',"
|
|
11989 |
" owning groups '%s'; retry the operation" %
|
|
11990 |
(utils.CommaJoin(inst_groups),
|
|
11991 |
utils.CommaJoin(owned_groups)))
|
|
11992 |
|
|
11993 |
if self.req_target_uuids:
|
|
11994 |
# User requested specific target groups
|
|
11995 |
self.target_uuids = self.req_target_uuids
|
|
11996 |
else:
|
|
11997 |
# All groups except the one to be evacuated are potential targets
|
|
11998 |
self.target_uuids = [group_uuid for group_uuid in owned_groups
|
|
11999 |
if group_uuid != self.group_uuid]
|
|
12000 |
|
|
12001 |
if not self.target_uuids:
|
|
12002 |
raise errors.OpExecError("There are no possible target groups")
|
|
12003 |
|
|
12004 |
def BuildHooksEnv(self):
|
|
12005 |
"""Build hooks env.
|
|
12006 |
|
|
12007 |
"""
|
|
12008 |
return {
|
|
12009 |
"GROUP_NAME": self.op.group_name,
|
|
12010 |
"TARGET_GROUPS": " ".join(self.target_uuids),
|
|
12011 |
}
|
|
12012 |
|
|
12013 |
def BuildHooksNodes(self):
|
|
12014 |
"""Build hooks nodes.
|
|
12015 |
|
|
12016 |
"""
|
|
12017 |
mn = self.cfg.GetMasterNode()
|
|
12018 |
|
|
12019 |
assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
|
|
12020 |
|
|
12021 |
run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
|
|
12022 |
|
|
12023 |
return (run_nodes, run_nodes)
|
|
12024 |
|
|
12025 |
def Exec(self, feedback_fn):
|
|
12026 |
instances = list(self.glm.list_owned(locking.LEVEL_INSTANCE))
|
|
12027 |
|
|
12028 |
assert self.group_uuid not in self.target_uuids
|
|
12029 |
|
|
12030 |
ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
|
|
12031 |
instances=instances, target_groups=self.target_uuids)
|
|
12032 |
|
|
12033 |
ial.Run(self.op.iallocator)
|
|
12034 |
|
|
12035 |
if not ial.success:
|
|
12036 |
raise errors.OpPrereqError("Can't compute group evacuation using"
|
|
12037 |
" iallocator '%s': %s" %
|
|
12038 |
(self.op.iallocator, ial.info),
|
|
12039 |
errors.ECODE_NORES)
|
|
12040 |
|
|
12041 |
jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
|
|
12042 |
|
|
12043 |
self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
|
|
12044 |
len(jobs), self.op.group_name)
|
|
12045 |
|
|
12046 |
return ResultWithJobs(jobs)
|
|
12047 |
|
|
12048 |
|
11878 |
12049 |
class TagsLU(NoHooksLU): # pylint: disable-msg=W0223
|
11879 |
12050 |
"""Generic tags LU.
|
11880 |
12051 |
|