from ganeti.masterd import iallocator
from ganeti import utils
from ganeti.cmdlib.base import LogicalUnit, Tasklet
-from ganeti.cmdlib.common import ExpandInstanceName, \
+from ganeti.cmdlib.common import ExpandInstanceUuidAndName, \
CheckIAllocatorOrNode, ExpandNodeUuidAndName
from ganeti.cmdlib.instance_storage import CheckDiskConsistency, \
ExpandCheckDisks, ShutdownInstanceDisks, AssembleInstanceDisks
if level == locking.LEVEL_NODE_ALLOC:
assert lu.op.instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
- instance = lu.cfg.GetInstanceInfo(lu.op.instance_name)
+ instance = lu.cfg.GetInstanceInfo(lu.op.instance_uuid)
# Node locks are already declared here rather than at LEVEL_NODE as we need
# the instance object anyway to declare the node allocation lock.
_ExpandNamesForMigration(self)
self._migrater = \
- TLMigrateInstance(self, self.op.instance_name, False, True, False,
+ TLMigrateInstance(self, self.op.instance_uuid, self.op.instance_name,
+ self.op.cleanup, True, False,
self.op.ignore_consistency, True,
self.op.shutdown_timeout, self.op.ignore_ipolicy)
"SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
"OLD_PRIMARY": self.cfg.GetNodeName(source_node_uuid),
"NEW_PRIMARY": self.op.target_node,
+ "FAILOVER_CLEANUP": self.op.cleanup,
}
if instance.disk_template in constants.DTS_INT_MIRROR:
_ExpandNamesForMigration(self)
self._migrater = \
- TLMigrateInstance(self, self.op.instance_name, self.op.cleanup,
- False, self.op.allow_failover, False,
+ TLMigrateInstance(self, self.op.instance_uuid, self.op.instance_name,
+ self.op.cleanup, False, self.op.allow_failover, False,
self.op.allow_runtime_changes,
constants.DEFAULT_SHUTDOWN_TIMEOUT,
self.op.ignore_ipolicy)
_MIGRATION_POLL_INTERVAL = 1 # seconds
_MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
- def __init__(self, lu, instance_name, cleanup, failover, fallback,
- ignore_consistency, allow_runtime_changes, shutdown_timeout,
- ignore_ipolicy):
+ def __init__(self, lu, instance_uuid, instance_name, cleanup, failover,
+ fallback, ignore_consistency, allow_runtime_changes,
+ shutdown_timeout, ignore_ipolicy):
"""Initializes this class.
"""
Tasklet.__init__(self, lu)
# Parameters
+ self.instance_uuid = instance_uuid
self.instance_name = instance_name
self.cleanup = cleanup
self.live = False # will be overridden later
This checks that the instance is in the cluster.
"""
- instance_name = ExpandInstanceName(self.lu.cfg, self.instance_name)
- self.instance = self.cfg.GetInstanceInfo(instance_name)
+ (self.instance_uuid, self.instance_name) = \
+ ExpandInstanceUuidAndName(self.lu.cfg, self.instance_uuid,
+ self.instance_name)
+ self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
assert self.instance is not None
cluster = self.cfg.GetClusterInfo()
# FIXME: add a self.ignore_ipolicy option
req = iallocator.IAReqRelocate(
- name=self.instance_name,
+ inst_uuid=self.instance_uuid,
relocate_from_node_uuids=[self.instance.primary_node])
ial = iallocator.IAllocator(self.cfg, self.rpc, req)
hvspecs = [(self.instance.hypervisor,
self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])]
nodeinfo = self.rpc.call_node_info(
- [self.source_node_uuid, self.target_node_uuid], None, hvspecs,
- False)
+ [self.source_node_uuid, self.target_node_uuid], None, hvspecs)
for ninfo in nodeinfo.values():
ninfo.Raise("Unable to retrieve node information from node '%s'" %
ninfo.node)
self.feedback_fn("* preparing %s to accept the instance" %
self.cfg.GetNodeName(self.target_node_uuid))
+ # This fills physical_id slot that may be missing on newly created disks
+ for disk in self.instance.disks:
+ self.cfg.SetDiskID(disk, self.target_node_uuid)
result = self.rpc.call_accept_instance(self.target_node_uuid,
self.instance,
migration_info,