4369 |
4369 |
return env, nl, nl
|
4370 |
4370 |
|
4371 |
4371 |
|
|
4372 |
class LUMoveInstance(LogicalUnit):
|
|
4373 |
"""Move an instance by data-copying.
|
|
4374 |
|
|
4375 |
"""
|
|
4376 |
HPATH = "instance-move"
|
|
4377 |
HTYPE = constants.HTYPE_INSTANCE
|
|
4378 |
_OP_REQP = ["instance_name", "target_node"]
|
|
4379 |
REQ_BGL = False
|
|
4380 |
|
|
4381 |
def ExpandNames(self):
|
|
4382 |
self._ExpandAndLockInstance()
|
|
4383 |
target_node = self.cfg.ExpandNodeName(self.op.target_node)
|
|
4384 |
if target_node is None:
|
|
4385 |
raise errors.OpPrereqError("Node '%s' not known" %
|
|
4386 |
self.op.target_node)
|
|
4387 |
self.op.target_node = target_node
|
|
4388 |
self.needed_locks[locking.LEVEL_NODE] = [target_node]
|
|
4389 |
self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
|
|
4390 |
|
|
4391 |
def DeclareLocks(self, level):
|
|
4392 |
if level == locking.LEVEL_NODE:
|
|
4393 |
self._LockInstancesNodes(primary_only=True)
|
|
4394 |
|
|
4395 |
def BuildHooksEnv(self):
|
|
4396 |
"""Build hooks env.
|
|
4397 |
|
|
4398 |
This runs on master, primary and secondary nodes of the instance.
|
|
4399 |
|
|
4400 |
"""
|
|
4401 |
env = {
|
|
4402 |
"TARGET_NODE": self.op.target_node,
|
|
4403 |
}
|
|
4404 |
env.update(_BuildInstanceHookEnvByObject(self, self.instance))
|
|
4405 |
nl = [self.cfg.GetMasterNode()] + [self.instance.primary_node,
|
|
4406 |
self.op.target_node]
|
|
4407 |
return env, nl, nl
|
|
4408 |
|
|
4409 |
def CheckPrereq(self):
|
|
4410 |
"""Check prerequisites.
|
|
4411 |
|
|
4412 |
This checks that the instance is in the cluster.
|
|
4413 |
|
|
4414 |
"""
|
|
4415 |
self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
|
|
4416 |
assert self.instance is not None, \
|
|
4417 |
"Cannot retrieve locked instance %s" % self.op.instance_name
|
|
4418 |
|
|
4419 |
node = self.cfg.GetNodeInfo(self.op.target_node)
|
|
4420 |
assert node is not None, \
|
|
4421 |
"Cannot retrieve locked node %s" % self.op.target_node
|
|
4422 |
|
|
4423 |
self.target_node = target_node = node.name
|
|
4424 |
|
|
4425 |
if target_node == instance.primary_node:
|
|
4426 |
raise errors.OpPrereqError("Instance %s is already on the node %s" %
|
|
4427 |
(instance.name, target_node))
|
|
4428 |
|
|
4429 |
bep = self.cfg.GetClusterInfo().FillBE(instance)
|
|
4430 |
|
|
4431 |
for idx, dsk in enumerate(instance.disks):
|
|
4432 |
if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
|
|
4433 |
raise errors.OpPrereqError("Instance disk %d has a complex layout,"
|
|
4434 |
" cannot copy")
|
|
4435 |
|
|
4436 |
_CheckNodeOnline(self, target_node)
|
|
4437 |
_CheckNodeNotDrained(self, target_node)
|
|
4438 |
|
|
4439 |
if instance.admin_up:
|
|
4440 |
# check memory requirements on the secondary node
|
|
4441 |
_CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
|
|
4442 |
instance.name, bep[constants.BE_MEMORY],
|
|
4443 |
instance.hypervisor)
|
|
4444 |
else:
|
|
4445 |
self.LogInfo("Not checking memory on the secondary node as"
|
|
4446 |
" instance will not be started")
|
|
4447 |
|
|
4448 |
# check bridge existance
|
|
4449 |
_CheckInstanceBridgesExist(self, instance, node=target_node)
|
|
4450 |
|
|
4451 |
def Exec(self, feedback_fn):
|
|
4452 |
"""Move an instance.
|
|
4453 |
|
|
4454 |
The move is done by shutting it down on its present node, copying
|
|
4455 |
the data over (slow) and starting it on the new node.
|
|
4456 |
|
|
4457 |
"""
|
|
4458 |
instance = self.instance
|
|
4459 |
|
|
4460 |
source_node = instance.primary_node
|
|
4461 |
target_node = self.target_node
|
|
4462 |
|
|
4463 |
self.LogInfo("Shutting down instance %s on source node %s",
|
|
4464 |
instance.name, source_node)
|
|
4465 |
|
|
4466 |
result = self.rpc.call_instance_shutdown(source_node, instance)
|
|
4467 |
msg = result.fail_msg
|
|
4468 |
if msg:
|
|
4469 |
if self.op.ignore_consistency:
|
|
4470 |
self.proc.LogWarning("Could not shutdown instance %s on node %s."
|
|
4471 |
" Proceeding anyway. Please make sure node"
|
|
4472 |
" %s is down. Error details: %s",
|
|
4473 |
instance.name, source_node, source_node, msg)
|
|
4474 |
else:
|
|
4475 |
raise errors.OpExecError("Could not shutdown instance %s on"
|
|
4476 |
" node %s: %s" %
|
|
4477 |
(instance.name, source_node, msg))
|
|
4478 |
|
|
4479 |
# create the target disks
|
|
4480 |
try:
|
|
4481 |
_CreateDisks(self, instance, target_node=target_node)
|
|
4482 |
except errors.OpExecError:
|
|
4483 |
self.LogWarning("Device creation failed, reverting...")
|
|
4484 |
try:
|
|
4485 |
_RemoveDisks(self, instance, target_node=target_node)
|
|
4486 |
finally:
|
|
4487 |
self.cfg.ReleaseDRBDMinors(instance.name)
|
|
4488 |
raise
|
|
4489 |
|
|
4490 |
cluster_name = self.cfg.GetClusterInfo().cluster_name
|
|
4491 |
|
|
4492 |
errs = []
|
|
4493 |
# activate, get path, copy the data over
|
|
4494 |
for idx, disk in enumerate(instance.disks):
|
|
4495 |
self.LogInfo("Copying data for disk %d", idx)
|
|
4496 |
result = self.rpc.call_blockdev_assemble(target_node, disk,
|
|
4497 |
instance.name, True)
|
|
4498 |
if result.fail_msg:
|
|
4499 |
self.LogWarning("Can't assemble newly created disk %d: %s",
|
|
4500 |
idx, result.fail_msg)
|
|
4501 |
errs.append(result.fail_msg)
|
|
4502 |
break
|
|
4503 |
dev_path = result.payload
|
|
4504 |
result = self.rpc.call_blockdev_export(source_node, disk,
|
|
4505 |
target_node, dev_path,
|
|
4506 |
cluster_name)
|
|
4507 |
if result.fail_msg:
|
|
4508 |
self.LogWarning("Can't copy data over for disk %d: %s",
|
|
4509 |
idx, result.fail_msg)
|
|
4510 |
errs.append(result.fail_msg)
|
|
4511 |
break
|
|
4512 |
|
|
4513 |
if errs:
|
|
4514 |
self.LogWarning("Some disks failed to copy, aborting")
|
|
4515 |
try:
|
|
4516 |
_RemoveDisks(self, instance, target_node=target_node)
|
|
4517 |
finally:
|
|
4518 |
self.cfg.ReleaseDRBDMinors(instance.name)
|
|
4519 |
raise errors.OpExecError("Errors during disk copy: %s" %
|
|
4520 |
(",".join(errs),))
|
|
4521 |
|
|
4522 |
instance.primary_node = target_node
|
|
4523 |
self.cfg.Update(instance)
|
|
4524 |
|
|
4525 |
self.LogInfo("Removing the disks on the original node")
|
|
4526 |
_RemoveDisks(self, instance, target_node=source_node)
|
|
4527 |
|
|
4528 |
# Only start the instance if it's marked as up
|
|
4529 |
if instance.admin_up:
|
|
4530 |
self.LogInfo("Starting instance %s on node %s",
|
|
4531 |
instance.name, target_node)
|
|
4532 |
|
|
4533 |
disks_ok, _ = _AssembleInstanceDisks(self, instance,
|
|
4534 |
ignore_secondaries=True)
|
|
4535 |
if not disks_ok:
|
|
4536 |
_ShutdownInstanceDisks(self, instance)
|
|
4537 |
raise errors.OpExecError("Can't activate the instance's disks")
|
|
4538 |
|
|
4539 |
result = self.rpc.call_instance_start(target_node, instance, None, None)
|
|
4540 |
msg = result.fail_msg
|
|
4541 |
if msg:
|
|
4542 |
_ShutdownInstanceDisks(self, instance)
|
|
4543 |
raise errors.OpExecError("Could not start instance %s on node %s: %s" %
|
|
4544 |
(instance.name, target_node, msg))
|
|
4545 |
|
|
4546 |
|
4372 |
4547 |
class LUMigrateNode(LogicalUnit):
|
4373 |
4548 |
"""Migrate all instances from a node.
|
4374 |
4549 |
|