5253 |
5253 |
return env, nl, nl
|
5254 |
5254 |
|
5255 |
5255 |
|
|
5256 |
class LUEvacuateNode(LogicalUnit):
|
|
5257 |
"""Relocate the secondary instances from a node.
|
|
5258 |
|
|
5259 |
"""
|
|
5260 |
HPATH = "node-evacuate"
|
|
5261 |
HTYPE = constants.HTYPE_NODE
|
|
5262 |
_OP_REQP = ["node_name"]
|
|
5263 |
REQ_BGL = False
|
|
5264 |
|
|
5265 |
def CheckArguments(self):
|
|
5266 |
if not hasattr(self.op, "remote_node"):
|
|
5267 |
self.op.remote_node = None
|
|
5268 |
if not hasattr(self.op, "iallocator"):
|
|
5269 |
self.op.iallocator = None
|
|
5270 |
|
|
5271 |
TLReplaceDisks.CheckArguments(constants.REPLACE_DISK_CHG,
|
|
5272 |
self.op.remote_node,
|
|
5273 |
self.op.iallocator)
|
|
5274 |
|
|
5275 |
def ExpandNames(self):
|
|
5276 |
self.op.node_name = self.cfg.ExpandNodeName(self.op.node_name)
|
|
5277 |
if self.op.node_name is None:
|
|
5278 |
raise errors.OpPrereqError("Node '%s' not known" % self.op.node_name)
|
|
5279 |
|
|
5280 |
self.needed_locks = {}
|
|
5281 |
|
|
5282 |
# Declare node locks
|
|
5283 |
if self.op.iallocator is not None:
|
|
5284 |
self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
|
|
5285 |
|
|
5286 |
elif self.op.remote_node is not None:
|
|
5287 |
remote_node = self.cfg.ExpandNodeName(self.op.remote_node)
|
|
5288 |
if remote_node is None:
|
|
5289 |
raise errors.OpPrereqError("Node '%s' not known" %
|
|
5290 |
self.op.remote_node)
|
|
5291 |
|
|
5292 |
self.op.remote_node = remote_node
|
|
5293 |
|
|
5294 |
# Warning: do not remove the locking of the new secondary here
|
|
5295 |
# unless DRBD8.AddChildren is changed to work in parallel;
|
|
5296 |
# currently it doesn't since parallel invocations of
|
|
5297 |
# FindUnusedMinor will conflict
|
|
5298 |
self.needed_locks[locking.LEVEL_NODE] = [remote_node]
|
|
5299 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
|
5300 |
|
|
5301 |
else:
|
|
5302 |
raise errors.OpPrereqError("Invalid parameters")
|
|
5303 |
|
|
5304 |
# Create tasklets for replacing disks for all secondary instances on this
|
|
5305 |
# node
|
|
5306 |
names = []
|
|
5307 |
|
|
5308 |
for inst in _GetNodeSecondaryInstances(self.cfg, self.op.node_name):
|
|
5309 |
logging.debug("Replacing disks for instance %s", inst.name)
|
|
5310 |
names.append(inst.name)
|
|
5311 |
|
|
5312 |
replacer = TLReplaceDisks(self, inst.name, constants.REPLACE_DISK_CHG,
|
|
5313 |
self.op.iallocator, self.op.remote_node, [])
|
|
5314 |
self.tasklets.append(replacer)
|
|
5315 |
|
|
5316 |
self.instance_names = names
|
|
5317 |
|
|
5318 |
# Declare instance locks
|
|
5319 |
self.needed_locks[locking.LEVEL_INSTANCE] = self.instance_names
|
|
5320 |
|
|
5321 |
def DeclareLocks(self, level):
|
|
5322 |
# If we're not already locking all nodes in the set we have to declare the
|
|
5323 |
# instance's primary/secondary nodes.
|
|
5324 |
if (level == locking.LEVEL_NODE and
|
|
5325 |
self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET):
|
|
5326 |
self._LockInstancesNodes()
|
|
5327 |
|
|
5328 |
def BuildHooksEnv(self):
|
|
5329 |
"""Build hooks env.
|
|
5330 |
|
|
5331 |
This runs on the master, the primary and all the secondaries.
|
|
5332 |
|
|
5333 |
"""
|
|
5334 |
env = {
|
|
5335 |
"NODE_NAME": self.op.node_name,
|
|
5336 |
}
|
|
5337 |
|
|
5338 |
nl = [self.cfg.GetMasterNode()]
|
|
5339 |
|
|
5340 |
if self.op.remote_node is not None:
|
|
5341 |
env["NEW_SECONDARY"] = self.op.remote_node
|
|
5342 |
nl.append(self.op.remote_node)
|
|
5343 |
|
|
5344 |
return (env, nl, nl)
|
|
5345 |
|
|
5346 |
|
5256 |
5347 |
class TLReplaceDisks(Tasklet):
|
5257 |
5348 |
"""Replaces disks for an instance.
|
5258 |
5349 |
|
... | ... | |
5423 |
5514 |
This dispatches the disk replacement to the appropriate handler.
|
5424 |
5515 |
|
5425 |
5516 |
"""
|
|
5517 |
feedback_fn("Replacing disks for %s" % self.instance.name)
|
|
5518 |
|
5426 |
5519 |
activate_disks = (not self.instance.admin_up)
|
5427 |
5520 |
|
5428 |
5521 |
# Activate the instance disks if we're replacing them on a down instance
|