Revision 642445d9 lib/cmdlib.py

b/lib/cmdlib.py
3767 3767
        warning("Failed to shutdown drbd for %s on old node" % dev.iv_name,
3768 3768
                "Please cleanup this device manually as soon as possible")
3769 3769

  
3770
      # we have new storage, we 'rename' the network on the primary
3771
      info("switching primary drbd for %s to new secondary node" % dev.iv_name)
3770
    info("detaching primary drbds from the network (=> standalone)")
3771
    done = 0
3772
    for dev in instance.disks:
3772 3773
      cfg.SetDiskID(dev, pri_node)
3773
      # rename to the ip of the new node
3774
      new_uid = list(dev.physical_id)
3775
      new_uid[2] = self.remote_node_info.secondary_ip
3776
      rlist = [(dev, tuple(new_uid))]
3777
      if not rpc.call_blockdev_rename(pri_node, rlist):
3778
        raise errors.OpExecError("Can't detach & re-attach drbd %s on node"
3779
                                 " %s from %s to %s" %
3780
                                 (dev.iv_name, pri_node, old_node, new_node))
3781
      dev.logical_id = (pri_node, new_node, dev.logical_id[2])
3774
      # set the physical (unique in bdev terms) id to None, meaning
3775
      # detach from network
3776
      dev.physical_id = (None,) * len(dev.physical_id)
3777
      # and 'find' the device, which will 'fix' it to match the
3778
      # standalone state
3779
      if rpc.call_blockdev_find(pri_node, dev):
3780
        done += 1
3781
      else:
3782
        warning("Failed to detach drbd %s from network, unusual case" %
3783
                dev.iv_name)
3784

  
3785
    if not done:
3786
      # no detaches succeeded (very unlikely)
3787
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
3788

  
3789
    # if we managed to detach at least one, we update all the disks of
3790
    # the instance to point to the new secondary
3791
    info("updating instance configuration")
3792
    for dev in instance.disks:
3793
      dev.logical_id = (pri_node, new_node) + dev.logical_id[2:]
3782 3794
      cfg.SetDiskID(dev, pri_node)
3783
      cfg.Update(instance)
3795
    cfg.Update(instance)
3784 3796

  
3797
    # and now perform the drbd attach
3798
    info("attaching primary drbds to new secondary (standalone => connected)")
3799
    failures = []
3800
    for dev in instance.disks:
3801
      info("attaching primary drbd for %s to new secondary node" % dev.iv_name)
3802
      # since the attach is smart, it's enough to 'find' the device,
3803
      # it will automatically activate the network, if the physical_id
3804
      # is correct
3805
      cfg.SetDiskID(dev, pri_node)
3806
      if not rpc.call_blockdev_find(pri_node, dev):
3807
        warning("can't attach drbd %s to new secondary!" % dev.iv_name,
3808
                "please do a gnt-instance info to see the status of disks")
3785 3809

  
3786 3810
    # this can fail as the old devices are degraded and _WaitForSync
3787 3811
    # does a combined result over all disks, so we don't check its

Also available in: Unified diff