7322 |
7322 |
return env, nl, nl
|
7323 |
7323 |
|
7324 |
7324 |
|
7325 |
|
class LUEvacuateNode(LogicalUnit):
|
7326 |
|
"""Relocate the secondary instances from a node.
|
7327 |
|
|
7328 |
|
"""
|
7329 |
|
HPATH = "node-evacuate"
|
7330 |
|
HTYPE = constants.HTYPE_NODE
|
7331 |
|
_OP_REQP = ["node_name"]
|
7332 |
|
_OP_DEFS = [
|
7333 |
|
("remote_node", None),
|
7334 |
|
("iallocator", None),
|
7335 |
|
("early_release", False),
|
7336 |
|
]
|
7337 |
|
REQ_BGL = False
|
7338 |
|
|
7339 |
|
def CheckArguments(self):
|
7340 |
|
TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
|
7341 |
|
self.op.remote_node,
|
7342 |
|
self.op.iallocator)
|
7343 |
|
|
7344 |
|
def ExpandNames(self):
|
7345 |
|
self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
|
7346 |
|
|
7347 |
|
self.needed_locks = {}
|
7348 |
|
|
7349 |
|
# Declare node locks
|
7350 |
|
if self.op.iallocator is not None:
|
7351 |
|
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
7352 |
|
|
7353 |
|
elif self.op.remote_node is not None:
|
7354 |
|
self.op.remote_node = _ExpandNodeName(self.cfg, self.op.remote_node)
|
7355 |
|
|
7356 |
|
# Warning: do not remove the locking of the new secondary here
|
7357 |
|
# unless DRBD8.AddChildren is changed to work in parallel;
|
7358 |
|
# currently it doesn't since parallel invocations of
|
7359 |
|
# FindUnusedMinor will conflict
|
7360 |
|
self.needed_locks[locking.LEVEL_NODE] = [self.op.remote_node]
|
7361 |
|
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
7362 |
|
|
7363 |
|
else:
|
7364 |
|
raise errors.OpPrereqError("Invalid parameters", errors.ECODE_INVAL)
|
7365 |
|
|
7366 |
|
# Create tasklets for replacing disks for all secondary instances on this
|
7367 |
|
# node
|
7368 |
|
names = []
|
7369 |
|
tasklets = []
|
7370 |
|
|
7371 |
|
for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
|
7372 |
|
logging.debug("Replacing disks for instance %s", inst.name)
|
7373 |
|
names.append(inst.name)
|
7374 |
|
|
7375 |
|
replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
|
7376 |
|
self.op.iallocator, self.op.remote_node, [],
|
7377 |
|
True, self.op.early_release)
|
7378 |
|
tasklets.append(replacer)
|
7379 |
|
|
7380 |
|
self.tasklets = tasklets
|
7381 |
|
self.instance_names = names
|
7382 |
|
|
7383 |
|
# Declare instance locks
|
7384 |
|
self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
|
7385 |
|
|
7386 |
|
def DeclareLocks(self, level):
|
7387 |
|
# If we're not already locking all nodes in the set we have to declare the
|
7388 |
|
# instance's primary/secondary nodes.
|
7389 |
|
if (level == locking.LEVEL_NODE and
|
7390 |
|
self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
|
7391 |
|
self._LockInstancesNodes()
|
7392 |
|
|
7393 |
|
def BuildHooksEnv(self):
|
7394 |
|
"""Build hooks env.
|
7395 |
|
|
7396 |
|
This runs on the master, the primary and all the secondaries.
|
7397 |
|
|
7398 |
|
"""
|
7399 |
|
env = {
|
7400 |
|
"NODE_NAME": self.op.node_name,
|
7401 |
|
}
|
7402 |
|
|
7403 |
|
nl = [self.cfg.GetMasterNode()]
|
7404 |
|
|
7405 |
|
if self.op.remote_node is not None:
|
7406 |
|
env["NEW_SECONDARY"] = self.op.remote_node
|
7407 |
|
nl.append(self.op.remote_node)
|
7408 |
|
|
7409 |
|
return (env, nl, nl)
|
7410 |
|
|
7411 |
|
|
7412 |
7325 |
class TLReplaceDisks(Tasklet):
|
7413 |
7326 |
"""Replaces disks for an instance.
|
7414 |
7327 |
|