Revision 5eacbcae lib/cmdlib/instance_migration.py

b/lib/cmdlib/instance_migration.py
30 30
from ganeti.masterd import iallocator
31 31
from ganeti import utils
32 32
from ganeti.cmdlib.base import LogicalUnit, Tasklet
33
from ganeti.cmdlib.common import _ExpandInstanceName, \
34
  _CheckIAllocatorOrNode, _ExpandNodeName
35
from ganeti.cmdlib.instance_storage import _CheckDiskConsistency, \
36
  _ExpandCheckDisks, _ShutdownInstanceDisks, _AssembleInstanceDisks
37
from ganeti.cmdlib.instance_utils import _BuildInstanceHookEnvByObject, \
38
  _CheckTargetNodeIPolicy, _ReleaseLocks, _CheckNodeNotDrained, \
39
  _CopyLockList, _CheckNodeFreeMemory, _CheckInstanceBridgesExist
33
from ganeti.cmdlib.common import ExpandInstanceName, \
34
  CheckIAllocatorOrNode, ExpandNodeName
35
from ganeti.cmdlib.instance_storage import CheckDiskConsistency, \
36
  ExpandCheckDisks, ShutdownInstanceDisks, AssembleInstanceDisks
37
from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
38
  CheckTargetNodeIPolicy, ReleaseLocks, CheckNodeNotDrained, \
39
  CopyLockList, CheckNodeFreeMemory, CheckInstanceBridgesExist
40 40

  
41 41
import ganeti.masterd.instance
42 42

  
......
48 48

  
49 49
  """
50 50
  if lu.op.target_node is not None:
51
    lu.op.target_node = _ExpandNodeName(lu.cfg, lu.op.target_node)
51
    lu.op.target_node = ExpandNodeName(lu.cfg, lu.op.target_node)
52 52

  
53 53
  lu.needed_locks[locking.LEVEL_NODE] = []
54 54
  lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
......
94 94
  elif level == locking.LEVEL_NODE_RES:
95 95
    # Copy node locks
96 96
    lu.needed_locks[locking.LEVEL_NODE_RES] = \
97
      _CopyLockList(lu.needed_locks[locking.LEVEL_NODE])
97
      CopyLockList(lu.needed_locks[locking.LEVEL_NODE])
98 98

  
99 99

  
100 100
class LUInstanceFailover(LogicalUnit):
......
148 148
    else:
149 149
      env["OLD_SECONDARY"] = env["NEW_SECONDARY"] = ""
150 150

  
151
    env.update(_BuildInstanceHookEnvByObject(self, instance))
151
    env.update(BuildInstanceHookEnvByObject(self, instance))
152 152

  
153 153
    return env
154 154

  
......
197 197
    instance = self._migrater.instance
198 198
    source_node = instance.primary_node
199 199
    target_node = self.op.target_node
200
    env = _BuildInstanceHookEnvByObject(self, instance)
200
    env = BuildInstanceHookEnvByObject(self, instance)
201 201
    env.update({
202 202
      "MIGRATE_LIVE": self._migrater.live,
203 203
      "MIGRATE_CLEANUP": self.op.cleanup,
......
280 280
    This checks that the instance is in the cluster.
281 281

  
282 282
    """
283
    instance_name = _ExpandInstanceName(self.lu.cfg, self.instance_name)
283
    instance_name = ExpandInstanceName(self.lu.cfg, self.instance_name)
284 284
    instance = self.cfg.GetInstanceInfo(instance_name)
285 285
    assert instance is not None
286 286
    self.instance = instance
......
303 303
                                 errors.ECODE_STATE)
304 304

  
305 305
    if instance.disk_template in constants.DTS_EXT_MIRROR:
306
      _CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
306
      CheckIAllocatorOrNode(self.lu, "iallocator", "target_node")
307 307

  
308 308
      if self.lu.op.iallocator:
309 309
        assert locking.NAL in self.lu.owned_locks(locking.LEVEL_NODE_ALLOC)
......
318 318
      group_info = self.cfg.GetNodeGroup(nodeinfo.group)
319 319
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
320 320
                                                              group_info)
321
      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
322
                              ignore=self.ignore_ipolicy)
321
      CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
322
                             ignore=self.ignore_ipolicy)
323 323

  
324 324
      # self.target_node is already populated, either directly or by the
325 325
      # iallocator run
......
333 333
      if len(self.lu.tasklets) == 1:
334 334
        # It is safe to release locks only when we're the only tasklet
335 335
        # in the LU
336
        _ReleaseLocks(self.lu, locking.LEVEL_NODE,
337
                      keep=[instance.primary_node, self.target_node])
338
        _ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
336
        ReleaseLocks(self.lu, locking.LEVEL_NODE,
337
                     keep=[instance.primary_node, self.target_node])
338
        ReleaseLocks(self.lu, locking.LEVEL_NODE_ALLOC)
339 339

  
340 340
    else:
341 341
      assert not self.lu.glm.is_owned(locking.LEVEL_NODE_ALLOC)
......
362 362
      group_info = self.cfg.GetNodeGroup(nodeinfo.group)
363 363
      ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
364 364
                                                              group_info)
365
      _CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
366
                              ignore=self.ignore_ipolicy)
365
      CheckTargetNodeIPolicy(self.lu, ipolicy, instance, nodeinfo, self.cfg,
366
                             ignore=self.ignore_ipolicy)
367 367

  
368 368
    i_be = cluster.FillBE(instance)
369 369

  
370 370
    # check memory requirements on the secondary node
371 371
    if (not self.cleanup and
372 372
         (not self.failover or instance.admin_state == constants.ADMINST_UP)):
373
      self.tgt_free_mem = _CheckNodeFreeMemory(self.lu, target_node,
374
                                               "migrating instance %s" %
375
                                               instance.name,
376
                                               i_be[constants.BE_MINMEM],
377
                                               instance.hypervisor)
373
      self.tgt_free_mem = CheckNodeFreeMemory(self.lu, target_node,
374
                                              "migrating instance %s" %
375
                                              instance.name,
376
                                              i_be[constants.BE_MINMEM],
377
                                              instance.hypervisor)
378 378
    else:
379 379
      self.lu.LogInfo("Not checking memory on the secondary node as"
380 380
                      " instance will not be started")
......
387 387
      self.failover = True
388 388

  
389 389
    # check bridge existance
390
    _CheckInstanceBridgesExist(self.lu, instance, node=target_node)
390
    CheckInstanceBridgesExist(self.lu, instance, node=target_node)
391 391

  
392 392
    if not self.cleanup:
393
      _CheckNodeNotDrained(self.lu, target_node)
393
      CheckNodeNotDrained(self.lu, target_node)
394 394
      if not self.failover:
395 395
        result = self.rpc.call_instance_migratable(instance.primary_node,
396 396
                                                   instance)
......
671 671

  
672 672
    self.feedback_fn("* checking disk consistency between source and target")
673 673
    for (idx, dev) in enumerate(instance.disks):
674
      if not _CheckDiskConsistency(self.lu, instance, dev, target_node, False):
674
      if not CheckDiskConsistency(self.lu, instance, dev, target_node, False):
675 675
        raise errors.OpExecError("Disk %s is degraded or not fully"
676 676
                                 " synchronized on target node,"
677 677
                                 " aborting migration" % idx)
......
805 805
    # If the instance's disk template is `rbd' or `ext' and there was a
806 806
    # successful migration, unmap the device from the source node.
807 807
    if self.instance.disk_template in (constants.DT_RBD, constants.DT_EXT):
808
      disks = _ExpandCheckDisks(instance, instance.disks)
808
      disks = ExpandCheckDisks(instance, instance.disks)
809 809
      self.feedback_fn("* unmapping instance's disks from %s" % source_node)
810 810
      for disk in disks:
811 811
        result = self.rpc.call_blockdev_shutdown(source_node, (disk, instance))
......
836 836
      self.feedback_fn("* checking disk consistency between source and target")
837 837
      for (idx, dev) in enumerate(instance.disks):
838 838
        # for drbd, these are drbd over lvm
839
        if not _CheckDiskConsistency(self.lu, instance, dev, target_node,
840
                                     False):
839
        if not CheckDiskConsistency(self.lu, instance, dev, target_node,
840
                                    False):
841 841
          if primary_node.offline:
842 842
            self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
843 843
                             " target node %s" %
......
869 869
                                 (instance.name, source_node, msg))
870 870

  
871 871
    self.feedback_fn("* deactivating the instance's disks on source node")
872
    if not _ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
872
    if not ShutdownInstanceDisks(self.lu, instance, ignore_primary=True):
873 873
      raise errors.OpExecError("Can't shut down the instance's disks")
874 874

  
875 875
    instance.primary_node = target_node
......
883 883
      logging.info("Starting instance %s on node %s",
884 884
                   instance.name, target_node)
885 885

  
886
      disks_ok, _ = _AssembleInstanceDisks(self.lu, instance,
887
                                           ignore_secondaries=True)
886
      disks_ok, _ = AssembleInstanceDisks(self.lu, instance,
887
                                          ignore_secondaries=True)
888 888
      if not disks_ok:
889
        _ShutdownInstanceDisks(self.lu, instance)
889
        ShutdownInstanceDisks(self.lu, instance)
890 890
        raise errors.OpExecError("Can't activate the instance's disks")
891 891

  
892 892
      self.feedback_fn("* starting the instance on the target node %s" %
......
895 895
                                            False, self.lu.op.reason)
896 896
      msg = result.fail_msg
897 897
      if msg:
898
        _ShutdownInstanceDisks(self.lu, instance)
898
        ShutdownInstanceDisks(self.lu, instance)
899 899
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
900 900
                                 (instance.name, target_node, msg))
901 901

  

Also available in: Unified diff