Revision a2d59d8b lib/cmdlib.py

b/lib/cmdlib.py
4647 4647
    old_node = self.tgt_node
4648 4648
    new_node = self.new_node
4649 4649
    pri_node = instance.primary_node
4650
    nodes_ip = {
4651
      old_node: self.cfg.GetNodeInfo(old_node).secondary_ip,
4652
      new_node: self.cfg.GetNodeInfo(new_node).secondary_ip,
4653
      pri_node: self.cfg.GetNodeInfo(pri_node).secondary_ip,
4654
      }
4650 4655

  
4651 4656
    # Step: check device activation
4652 4657
    self.proc.LogStep(1, steps_total, "check device existence")
......
4705 4710
    for idx, (dev, new_minor) in enumerate(zip(instance.disks, minors)):
4706 4711
      size = dev.size
4707 4712
      info("activating a new drbd on %s for disk/%d" % (new_node, idx))
4708
      # create new devices on new_node
4709
      if pri_node == dev.logical_id[0]:
4710
        new_logical_id = (pri_node, new_node,
4711
                          dev.logical_id[2], dev.logical_id[3], new_minor,
4712
                          dev.logical_id[5])
4713
      # create new devices on new_node; note that we create two IDs:
4714
      # one without port, so the drbd will be activated without
4715
      # networking information on the new node at this stage, and one
4716
      # with network, for the latter activation in step 4
4717
      (o_node1, o_node2, o_port, o_minor1, o_minor2, o_secret) = dev.logical_id
4718
      if pri_node == o_node1:
4719
        p_minor = o_minor1
4713 4720
      else:
4714
        new_logical_id = (new_node, pri_node,
4715
                          dev.logical_id[2], new_minor, dev.logical_id[4],
4716
                          dev.logical_id[5])
4717
      iv_names[idx] = (dev, dev.children, new_logical_id)
4721
        p_minor = o_minor2
4722

  
4723
      new_alone_id = (pri_node, new_node, None, p_minor, new_minor, o_secret)
4724
      new_net_id = (pri_node, new_node, o_port, p_minor, new_minor, o_secret)
4725

  
4726
      iv_names[idx] = (dev, dev.children, new_net_id)
4718 4727
      logging.debug("Allocated new_minor: %s, new_logical_id: %s", new_minor,
4719
                    new_logical_id)
4728
                    new_net_id)
4720 4729
      new_drbd = objects.Disk(dev_type=constants.LD_DRBD8,
4721
                              logical_id=new_logical_id,
4730
                              logical_id=new_alone_id,
4722 4731
                              children=dev.children)
4723 4732
      if not _CreateBlockDevOnSecondary(self, new_node, instance,
4724 4733
                                        new_drbd, False,
......
4737 4746
                hint="Please cleanup this device manually as soon as possible")
4738 4747

  
4739 4748
    info("detaching primary drbds from the network (=> standalone)")
4740
    done = 0
4741
    for idx, dev in enumerate(instance.disks):
4742
      cfg.SetDiskID(dev, pri_node)
4743
      # set the network part of the physical (unique in bdev terms) id
4744
      # to None, meaning detach from network
4745
      dev.physical_id = (None, None, None, None) + dev.physical_id[4:]
4746
      # and 'find' the device, which will 'fix' it to match the
4747
      # standalone state
4748
      result = self.rpc.call_blockdev_find(pri_node, dev)
4749
      if not result.failed and result.data:
4750
        done += 1
4751
      else:
4752
        warning("Failed to detach drbd disk/%d from network, unusual case" %
4753
                idx)
4749
    result = self.rpc.call_drbd_disconnect_net([pri_node], nodes_ip,
4750
                                               instance.disks)[pri_node]
4754 4751

  
4755
    if not done:
4756
      # no detaches succeeded (very unlikely)
4752
    msg = result.RemoteFailMsg()
4753
    if msg:
4754
      # detaches didn't succeed (unlikely)
4757 4755
      self.cfg.ReleaseDRBDMinors(instance.name)
4758
      raise errors.OpExecError("Can't detach at least one DRBD from old node")
4756
      raise errors.OpExecError("Can't detach the disks from the network on"
4757
                               " old node: %s" % (msg,))
4759 4758

  
4760 4759
    # if we managed to detach at least one, we update all the disks of
4761 4760
    # the instance to point to the new secondary
......
4770 4769

  
4771 4770
    # and now perform the drbd attach
4772 4771
    info("attaching primary drbds to new secondary (standalone => connected)")
4773
    for idx, dev in enumerate(instance.disks):
4774
      info("attaching primary drbd for disk/%d to new secondary node" % idx)
4775
      # since the attach is smart, it's enough to 'find' the device,
4776
      # it will automatically activate the network, if the physical_id
4777
      # is correct
4778
      cfg.SetDiskID(dev, pri_node)
4779
      logging.debug("Disk to attach: %s", dev)
4780
      result = self.rpc.call_blockdev_find(pri_node, dev)
4781
      if result.failed or not result.data:
4782
        warning("can't attach drbd disk/%d to new secondary!" % idx,
4783
                "please do a gnt-instance info to see the status of disks")
4772
    result = self.rpc.call_drbd_attach_net([pri_node, new_node], nodes_ip,
4773
                                           instance.disks, instance.name,
4774
                                           False)
4775
    for to_node, to_result in result.items():
4776
      msg = to_result.RemoteFailMsg()
4777
      if msg:
4778
        warning("can't attach drbd disks on node %s: %s", to_node, msg,
4779
                hint="please do a gnt-instance info to see the"
4780
                " status of disks")
4784 4781

  
4785 4782
    # this can fail as the old devices are degraded and _WaitForSync
4786 4783
    # does a combined result over all disks, so we don't check its

Also available in: Unified diff