11137 |
11137 |
}
|
11138 |
11138 |
|
11139 |
11139 |
|
|
11140 |
class LUInstanceChangeGroup(LogicalUnit):
|
|
11141 |
HPATH = "instance-change-group"
|
|
11142 |
HTYPE = constants.HTYPE_INSTANCE
|
|
11143 |
REQ_BGL = False
|
|
11144 |
|
|
11145 |
def ExpandNames(self):
|
|
11146 |
self.share_locks = _ShareAll()
|
|
11147 |
self.needed_locks = {
|
|
11148 |
locking.LEVEL_NODEGROUP: [],
|
|
11149 |
locking.LEVEL_NODE: [],
|
|
11150 |
}
|
|
11151 |
|
|
11152 |
self._ExpandAndLockInstance()
|
|
11153 |
|
|
11154 |
if self.op.target_groups:
|
|
11155 |
self.req_target_uuids = map(self.cfg.LookupNodeGroup,
|
|
11156 |
self.op.target_groups)
|
|
11157 |
else:
|
|
11158 |
self.req_target_uuids = None
|
|
11159 |
|
|
11160 |
self.op.iallocator = _GetDefaultIAllocator(self.cfg, self.op.iallocator)
|
|
11161 |
|
|
11162 |
def DeclareLocks(self, level):
|
|
11163 |
if level == locking.LEVEL_NODEGROUP:
|
|
11164 |
assert not self.needed_locks[locking.LEVEL_NODEGROUP]
|
|
11165 |
|
|
11166 |
if self.req_target_uuids:
|
|
11167 |
lock_groups = set(self.req_target_uuids)
|
|
11168 |
|
|
11169 |
# Lock all groups used by instance optimistically; this requires going
|
|
11170 |
# via the node before it's locked, requiring verification later on
|
|
11171 |
instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
|
|
11172 |
lock_groups.update(instance_groups)
|
|
11173 |
else:
|
|
11174 |
# No target groups, need to lock all of them
|
|
11175 |
lock_groups = locking.ALL_SET
|
|
11176 |
|
|
11177 |
self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
|
|
11178 |
|
|
11179 |
elif level == locking.LEVEL_NODE:
|
|
11180 |
if self.req_target_uuids:
|
|
11181 |
# Lock all nodes used by instances
|
|
11182 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
|
11183 |
self._LockInstancesNodes()
|
|
11184 |
|
|
11185 |
# Lock all nodes in all potential target groups
|
|
11186 |
lock_groups = (frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP)) -
|
|
11187 |
self.cfg.GetInstanceNodeGroups(self.op.instance_name))
|
|
11188 |
member_nodes = [node_name
|
|
11189 |
for group in lock_groups
|
|
11190 |
for node_name in self.cfg.GetNodeGroup(group).members]
|
|
11191 |
self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
|
|
11192 |
else:
|
|
11193 |
# Lock all nodes as all groups are potential targets
|
|
11194 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
|
11195 |
|
|
11196 |
def CheckPrereq(self):
|
|
11197 |
owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
|
|
11198 |
owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
|
|
11199 |
owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
|
|
11200 |
|
|
11201 |
assert (self.req_target_uuids is None or
|
|
11202 |
owned_groups.issuperset(self.req_target_uuids))
|
|
11203 |
assert owned_instances == set([self.op.instance_name])
|
|
11204 |
|
|
11205 |
# Get instance information
|
|
11206 |
self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
|
|
11207 |
|
|
11208 |
# Check if node groups for locked instance are still correct
|
|
11209 |
assert owned_nodes.issuperset(self.instance.all_nodes), \
|
|
11210 |
("Instance %s's nodes changed while we kept the lock" %
|
|
11211 |
self.op.instance_name)
|
|
11212 |
|
|
11213 |
inst_groups = _CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
|
|
11214 |
owned_groups)
|
|
11215 |
|
|
11216 |
if self.req_target_uuids:
|
|
11217 |
# User requested specific target groups
|
|
11218 |
self.target_uuids = self.req_target_uuids
|
|
11219 |
else:
|
|
11220 |
# All groups except those used by the instance are potential targets
|
|
11221 |
self.target_uuids = owned_groups - inst_groups
|
|
11222 |
|
|
11223 |
conflicting_groups = self.target_uuids & inst_groups
|
|
11224 |
if conflicting_groups:
|
|
11225 |
raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
|
|
11226 |
" used by the instance '%s'" %
|
|
11227 |
(utils.CommaJoin(conflicting_groups),
|
|
11228 |
self.op.instance_name),
|
|
11229 |
errors.ECODE_INVAL)
|
|
11230 |
|
|
11231 |
if not self.target_uuids:
|
|
11232 |
raise errors.OpPrereqError("There are no possible target groups",
|
|
11233 |
errors.ECODE_INVAL)
|
|
11234 |
|
|
11235 |
def BuildHooksEnv(self):
|
|
11236 |
"""Build hooks env.
|
|
11237 |
|
|
11238 |
"""
|
|
11239 |
assert self.target_uuids
|
|
11240 |
|
|
11241 |
env = {
|
|
11242 |
"TARGET_GROUPS": " ".join(self.target_uuids),
|
|
11243 |
}
|
|
11244 |
|
|
11245 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
|
|
11246 |
|
|
11247 |
return env
|
|
11248 |
|
|
11249 |
def BuildHooksNodes(self):
|
|
11250 |
"""Build hooks nodes.
|
|
11251 |
|
|
11252 |
"""
|
|
11253 |
mn = self.cfg.GetMasterNode()
|
|
11254 |
return ([mn], [mn])
|
|
11255 |
|
|
11256 |
def Exec(self, feedback_fn):
|
|
11257 |
instances = list(self.glm.list_owned(locking.LEVEL_INSTANCE))
|
|
11258 |
|
|
11259 |
assert instances == [self.op.instance_name], "Instance not locked"
|
|
11260 |
|
|
11261 |
ial = IAllocator(self.cfg, self.rpc, constants.IALLOCATOR_MODE_CHG_GROUP,
|
|
11262 |
instances=instances, target_groups=list(self.target_uuids))
|
|
11263 |
|
|
11264 |
ial.Run(self.op.iallocator)
|
|
11265 |
|
|
11266 |
if not ial.success:
|
|
11267 |
raise errors.OpPrereqError("Can't compute solution for changing group of"
|
|
11268 |
" instance '%s' using iallocator '%s': %s" %
|
|
11269 |
(self.op.instance_name, self.op.iallocator,
|
|
11270 |
ial.info),
|
|
11271 |
errors.ECODE_NORES)
|
|
11272 |
|
|
11273 |
jobs = _LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
|
|
11274 |
|
|
11275 |
self.LogInfo("Iallocator returned %s job(s) for changing group of"
|
|
11276 |
" instance '%s'", len(jobs), self.op.instance_name)
|
|
11277 |
|
|
11278 |
return ResultWithJobs(jobs)
|
|
11279 |
|
|
11280 |
|
11140 |
11281 |
class LUBackupQuery(NoHooksLU):
|
11141 |
11282 |
"""Query the exports list
|
11142 |
11283 |
|