ignore_consistency=opts.ignore_consistency,
shutdown_timeout=opts.shutdown_timeout,
iallocator=iallocator,
- target_node=target_node)
+ target_node=target_node,
+ ignore_ipolicy=opts.ignore_ipolicy)
SubmitOrSend(op, opts, cl=cl)
return 0
"failover": (
FailoverInstance, ARGS_ONE_INSTANCE,
[FORCE_OPT, IGNORE_CONSIST_OPT, SUBMIT_OPT, SHUTDOWN_TIMEOUT_OPT,
- DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT],
+ DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT,
+ IGNORE_IPOLICY_OPT],
"[-f] <instance>", "Stops the instance, changes its primary node and"
" (if it was originally running) starts it on the new node"
" (the secondary for mirrored instances or any node"
cleanup=False,
failover=True,
ignore_consistency=ignore_consistency,
- shutdown_timeout=shutdown_timeout)
+ shutdown_timeout=shutdown_timeout,
+ ignore_ipolicy=self.op.ignore_ipolicy)
self.tasklets = [self._migrater]
def DeclareLocks(self, level):
and target node
@type shutdown_timeout: int
@ivar shutdown_timeout: In case of failover timeout of the shutdown
+ @type ignore_ipolicy: bool
+ @ivar ignore_ipolicy: If true, we can ignore instance policy when migrating
"""
def __init__(self, lu, instance_name, cleanup=False,
failover=False, fallback=False,
ignore_consistency=False,
- shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT):
+ shutdown_timeout=constants.DEFAULT_SHUTDOWN_TIMEOUT,
+ ignore_ipolicy=False):
"""Initializes this class.
"""
self.fallback = fallback
self.ignore_consistency = ignore_consistency
self.shutdown_timeout = shutdown_timeout
+ self.ignore_ipolicy = ignore_ipolicy
def CheckPrereq(self):
"""Check prerequisites.
instance = self.cfg.GetInstanceInfo(instance_name)
assert instance is not None
self.instance = instance
+ cluster = self.cfg.GetClusterInfo()
if (not self.cleanup and
not instance.admin_state == constants.ADMINST_UP and
# BuildHooksEnv
self.target_node = self.lu.op.target_node
+ # Check that the target node is correct in terms of instance policy
+ nodeinfo = self.cfg.GetNodeInfo(self.target_node)
+ ipolicy = _CalculateGroupIPolicy(cluster, nodeinfo.group)
+ _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+ ignore=self.ignore_ipolicy)
+
# self.target_node is already populated, either directly or by the
# iallocator run
target_node = self.target_node
" node can be passed)" %
(instance.disk_template, text),
errors.ECODE_INVAL)
+ nodeinfo = self.cfg.GetNodeInfo(target_node)
+ ipolicy = _CalculateGroupIPolicy(cluster, nodeinfo.group)
+ _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo,
+ ignore=self.ignore_ipolicy)
- i_be = self.cfg.GetClusterInfo().FillBE(instance)
+ i_be = cluster.FillBE(instance)
# check memory requirements on the secondary node
if not self.failover or instance.admin_state == constants.ADMINST_UP:
self.lu.op.live = None
elif self.lu.op.mode is None:
# read the default value from the hypervisor
- i_hv = self.cfg.GetClusterInfo().FillHV(self.instance,
- skip_globals=False)
+ i_hv = cluster.FillHV(self.instance, skip_globals=False)
self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE]
self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE
"""Run the allocator based on input opcode.
"""
+ # FIXME: add a self.ignore_ipolicy option
ial = IAllocator(self.cfg, self.rpc,
mode=constants.IALLOCATOR_MODE_RELOC,
name=self.instance_name,
_PHvState = ("hv_state", None, ht.TMaybeDict, "Set hypervisor states")
_PDiskState = ("disk_state", None, ht.TMaybeDict, "Set disk states")
+
+_PIgnoreIpolicy = ("ignore_ipolicy", False, ht.TBool,
+ "Whether to ignore ipolicy violations")
+
#: OP_ID conversion regular expression
_OPID_RE = re.compile("([a-z])([A-Z])")
_PShutdownTimeout,
_PIgnoreConsistency,
_PMigrationTargetNode,
+ _PIgnoreIpolicy,
("iallocator", None, ht.TMaybeString,
"Iallocator for deciding the target node for shared-storage instances"),
]