Merge branch 'stable-2.8' into stable-2.9
[ganeti-local] / lib / cmdlib / instance_migration.py
index 44efdae..7d45b4f 100644 (file)
@@ -30,7 +30,7 @@ from ganeti import locking
 from ganeti.masterd import iallocator
 from ganeti import utils
 from ganeti.cmdlib.base import LogicalUnit, Tasklet
-from ganeti.cmdlib.common import ExpandInstanceName, \
+from ganeti.cmdlib.common import ExpandInstanceUuidAndName, \
   CheckIAllocatorOrNode, ExpandNodeUuidAndName
 from ganeti.cmdlib.instance_storage import CheckDiskConsistency, \
   ExpandCheckDisks, ShutdownInstanceDisks, AssembleInstanceDisks
@@ -72,7 +72,7 @@ def _DeclareLocksForMigration(lu, level):
   if level == locking.LEVEL_NODE_ALLOC:
     assert lu.op.instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
 
-    instance = lu.cfg.GetInstanceInfo(lu.op.instance_name)
+    instance = lu.cfg.GetInstanceInfo(lu.op.instance_uuid)
 
     # Node locks are already declared here rather than at LEVEL_NODE as we need
     # the instance object anyway to declare the node allocation lock.
@@ -118,7 +118,8 @@ class LUInstanceFailover(LogicalUnit):
     _ExpandNamesForMigration(self)
 
     self._migrater = \
-      TLMigrateInstance(self, self.op.instance_name, False, True, False,
+      TLMigrateInstance(self, self.op.instance_uuid, self.op.instance_name,
+                        self.op.cleanup, True, False,
                         self.op.ignore_consistency, True,
                         self.op.shutdown_timeout, self.op.ignore_ipolicy)
 
@@ -140,6 +141,7 @@ class LUInstanceFailover(LogicalUnit):
       "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
       "OLD_PRIMARY": self.cfg.GetNodeName(source_node_uuid),
       "NEW_PRIMARY": self.op.target_node,
+      "FAILOVER_CLEANUP": self.op.cleanup,
       }
 
     if instance.disk_template in constants.DTS_INT_MIRROR:
@@ -177,8 +179,8 @@ class LUInstanceMigrate(LogicalUnit):
     _ExpandNamesForMigration(self)
 
     self._migrater = \
-      TLMigrateInstance(self, self.op.instance_name, self.op.cleanup,
-                        False, self.op.allow_failover, False,
+      TLMigrateInstance(self, self.op.instance_uuid, self.op.instance_name,
+                        self.op.cleanup, False, self.op.allow_failover, False,
                         self.op.allow_runtime_changes,
                         constants.DEFAULT_SHUTDOWN_TIMEOUT,
                         self.op.ignore_ipolicy)
@@ -255,15 +257,16 @@ class TLMigrateInstance(Tasklet):
   _MIGRATION_POLL_INTERVAL = 1      # seconds
   _MIGRATION_FEEDBACK_INTERVAL = 10 # seconds
 
-  def __init__(self, lu, instance_name, cleanup, failover, fallback,
-               ignore_consistency, allow_runtime_changes, shutdown_timeout,
-               ignore_ipolicy):
+  def __init__(self, lu, instance_uuid, instance_name, cleanup, failover,
+               fallback, ignore_consistency, allow_runtime_changes,
+               shutdown_timeout, ignore_ipolicy):
     """Initializes this class.
 
     """
     Tasklet.__init__(self, lu)
 
     # Parameters
+    self.instance_uuid = instance_uuid
     self.instance_name = instance_name
     self.cleanup = cleanup
     self.live = False # will be overridden later
@@ -280,8 +283,10 @@ class TLMigrateInstance(Tasklet):
     This checks that the instance is in the cluster.
 
     """
-    instance_name = ExpandInstanceName(self.lu.cfg, self.instance_name)
-    self.instance = self.cfg.GetInstanceInfo(instance_name)
+    (self.instance_uuid, self.instance_name) = \
+      ExpandInstanceUuidAndName(self.lu.cfg, self.instance_uuid,
+                                self.instance_name)
+    self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)
     assert self.instance is not None
     cluster = self.cfg.GetClusterInfo()
 
@@ -448,7 +453,7 @@ class TLMigrateInstance(Tasklet):
 
     # FIXME: add a self.ignore_ipolicy option
     req = iallocator.IAReqRelocate(
-          name=self.instance_name,
+          inst_uuid=self.instance_uuid,
           relocate_from_node_uuids=[self.instance.primary_node])
     ial = iallocator.IAllocator(self.cfg, self.rpc, req)
 
@@ -657,8 +662,7 @@ class TLMigrateInstance(Tasklet):
     hvspecs = [(self.instance.hypervisor,
                 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])]
     nodeinfo = self.rpc.call_node_info(
-                 [self.source_node_uuid, self.target_node_uuid], None, hvspecs,
-                 False)
+                 [self.source_node_uuid, self.target_node_uuid], None, hvspecs)
     for ninfo in nodeinfo.values():
       ninfo.Raise("Unable to retrieve node information from node '%s'" %
                   ninfo.node)
@@ -717,6 +721,9 @@ class TLMigrateInstance(Tasklet):
 
     self.feedback_fn("* preparing %s to accept the instance" %
                      self.cfg.GetNodeName(self.target_node_uuid))
+    # This fills physical_id slot that may be missing on newly created disks
+    for disk in self.instance.disks:
+      self.cfg.SetDiskID(disk, self.target_node_uuid)
     result = self.rpc.call_accept_instance(self.target_node_uuid,
                                            self.instance,
                                            migration_info,