+ return output
+
+
+class LUFailoverInstance(LogicalUnit):
+ """Failover an instance.
+
+ """
+ HPATH = "instance-failover"
+ HTYPE = constants.HTYPE_INSTANCE
+ _OP_REQP = ["instance_name", "ignore_consistency"]
+ REQ_BGL = False
+
+ def CheckArguments(self):
+ """Check the arguments.
+
+ """
+ self.shutdown_timeout = getattr(self.op, "shutdown_timeout",
+ constants.DEFAULT_SHUTDOWN_TIMEOUT)
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE:
+ self._LockInstancesNodes()
+
+ def BuildHooksEnv(self):
+ """Build hooks env.
+
+ This runs on master, primary and secondary nodes of the instance.
+
+ """
+ env = {
+ "IGNORE_CONSISTENCY": self.op.ignore_consistency,
+ "SHUTDOWN_TIMEOUT": self.shutdown_timeout,
+ }
+ env.update(_BuildInstanceHookEnvByObject(self, self.instance))
+ nl = [self.cfg.GetMasterNode()] + list(self.instance.secondary_nodes)
+ return env, nl, nl
+
+ def CheckPrereq(self):
+ """Check prerequisites.
+
+ This checks that the instance is in the cluster.
+
+ """
+ self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
+ assert self.instance is not None, \
+ "Cannot retrieve locked instance %s" % self.op.instance_name
+
+ bep = self.cfg.GetClusterInfo().FillBE(instance)
+ if instance.disk_template not in constants.DTS_NET_MIRROR:
+ raise errors.OpPrereqError("Instance's disk layout is not"
+ " network mirrored, cannot failover.",
+ errors.ECODE_STATE)
+
+ secondary_nodes = instance.secondary_nodes
+ if not secondary_nodes:
+ raise errors.ProgrammerError("no secondary node but using "
+ "a mirrored disk template")
+
+ target_node = secondary_nodes[0]
+ _CheckNodeOnline(self, target_node)
+ _CheckNodeNotDrained(self, target_node)
+ if instance.admin_up:
+ # check memory requirements on the secondary node
+ _CheckNodeFreeMemory(self, target_node, "failing over instance %s" %
+ instance.name, bep[constants.BE_MEMORY],
+ instance.hypervisor)
+ else:
+ self.LogInfo("Not checking memory on the secondary node as"
+ " instance will not be started")
+
+ # check bridge existance
+ _CheckInstanceBridgesExist(self, instance, node=target_node)
+
+ def Exec(self, feedback_fn):
+ """Failover an instance.
+
+ The failover is done by shutting it down on its present node and
+ starting it on the secondary.
+
+ """
+ instance = self.instance
+
+ source_node = instance.primary_node
+ target_node = instance.secondary_nodes[0]
+
+ if instance.admin_up:
+ feedback_fn("* checking disk consistency between source and target")
+ for dev in instance.disks:
+ # for drbd, these are drbd over lvm
+ if not _CheckDiskConsistency(self, dev, target_node, False):
+ if not self.op.ignore_consistency:
+ raise errors.OpExecError("Disk %s is degraded on target node,"
+ " aborting failover." % dev.iv_name)
+ else:
+ feedback_fn("* not checking disk consistency as instance is not running")
+
+ feedback_fn("* shutting down instance on source node")
+ logging.info("Shutting down instance %s on node %s",
+ instance.name, source_node)
+
+ result = self.rpc.call_instance_shutdown(source_node, instance,
+ self.shutdown_timeout)
+ msg = result.fail_msg
+ if msg:
+ if self.op.ignore_consistency:
+ self.proc.LogWarning("Could not shutdown instance %s on node %s."
+ " Proceeding anyway. Please make sure node"
+ " %s is down. Error details: %s",
+ instance.name, source_node, source_node, msg)
+ else:
+ raise errors.OpExecError("Could not shutdown instance %s on"
+ " node %s: %s" %
+ (instance.name, source_node, msg))
+
+ feedback_fn("* deactivating the instance's disks on source node")
+ if not _ShutdownInstanceDisks(self, instance, ignore_primary=True):
+ raise errors.OpExecError("Can't shut down the instance's disks.")
+
+ instance.primary_node = target_node
+ # distribute new instance config to the other nodes
+ self.cfg.Update(instance, feedback_fn)
+
+ # Only start the instance if it's marked as up
+ if instance.admin_up:
+ feedback_fn("* activating the instance's disks on target node")
+ logging.info("Starting instance %s on node %s",
+ instance.name, target_node)
+
+ disks_ok, _ = _AssembleInstanceDisks(self, instance,
+ ignore_secondaries=True)
+ if not disks_ok:
+ _ShutdownInstanceDisks(self, instance)
+ raise errors.OpExecError("Can't activate the instance's disks")
+
+ feedback_fn("* starting the instance on the target node")
+ result = self.rpc.call_instance_start(target_node, instance, None, None)
+ msg = result.fail_msg
+ if msg:
+ _ShutdownInstanceDisks(self, instance)
+ raise errors.OpExecError("Could not start instance %s on node %s: %s" %
+ (instance.name, target_node, msg))
+
+
+class LUMigrateInstance(LogicalUnit):
+ """Migrate an instance.
+
+ This is migration without shutting down, compared to the failover,
+ which is done with shutdown.
+
+ """
+ HPATH = "instance-migrate"
+ HTYPE = constants.HTYPE_INSTANCE
+ _OP_REQP = ["instance_name", "live", "cleanup"]
+
+ REQ_BGL = False
+
+ def ExpandNames(self):
+ self._ExpandAndLockInstance()
+
+ self.needed_locks[locking.LEVEL_NODE] = []
+ self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
+
+ self._migrater = TLMigrateInstance(self, self.op.instance_name,
+ self.op.live, self.op.cleanup)
+ self.tasklets = [self._migrater]
+
+ def DeclareLocks(self, level):
+ if level == locking.LEVEL_NODE:
+ self._LockInstancesNodes()