Revision abdf0113 lib/cmdlib.py

b/lib/cmdlib.py
2872 2872
    secondary_nodes = instance.secondary_nodes
2873 2873
    if not secondary_nodes:
2874 2874
      raise errors.ProgrammerError("no secondary node but using "
2875
                                   "DT_REMOTE_RAID1 template")
2875
                                   "a mirrored disk template")
2876 2876

  
2877 2877
    target_node = secondary_nodes[0]
2878 2878
    # check memory requirements on the secondary node
......
2902 2902

  
2903 2903
    feedback_fn("* checking disk consistency between source and target")
2904 2904
    for dev in instance.disks:
2905
      # for remote_raid1, these are md over drbd
2905
      # for drbd, these are drbd over lvm
2906 2906
      if not _CheckDiskConsistency(self.cfg, dev, target_node, False):
2907 2907
        if instance.status == "up" and not self.op.ignore_consistency:
2908 2908
          raise errors.OpExecError("Disk %s is degraded on target node,"
......
3751 3751
        # replacement as for drbd7 (no different port allocated)
3752 3752
        raise errors.OpPrereqError("Same secondary given, cannot execute"
3753 3753
                                   " replacement")
3754
      # the user gave the current secondary, switch to
3755
      # 'no-replace-secondary' mode for drbd7
3756
      remote_node = None
3757
    if (instance.disk_template == constants.DT_REMOTE_RAID1 and
3758
        self.op.mode != constants.REPLACE_DISK_ALL):
3759
      raise errors.OpPrereqError("Template 'remote_raid1' only allows all"
3760
                                 " disks replacement, not individual ones")
3761 3754
    if instance.disk_template == constants.DT_DRBD8:
3762 3755
      if (self.op.mode == constants.REPLACE_DISK_ALL and
3763 3756
          remote_node is not None):
......
3789 3782
                                   (name, instance.name))
3790 3783
    self.op.remote_node = remote_node
3791 3784

  
3792
  def _ExecRR1(self, feedback_fn):
3793
    """Replace the disks of an instance.
3794

  
3795
    """
3796
    instance = self.instance
3797
    iv_names = {}
3798
    # start of work
3799
    if self.op.remote_node is None:
3800
      remote_node = self.sec_node
3801
    else:
3802
      remote_node = self.op.remote_node
3803
    cfg = self.cfg
3804
    for dev in instance.disks:
3805
      size = dev.size
3806
      lv_names = [".%s_%s" % (dev.iv_name, suf) for suf in ["data", "meta"]]
3807
      names = _GenerateUniqueNames(cfg, lv_names)
3808
      new_drbd = _GenerateMDDRBDBranch(cfg, instance.primary_node,
3809
                                       remote_node, size, names)
3810
      iv_names[dev.iv_name] = (dev, dev.children[0], new_drbd)
3811
      logger.Info("adding new mirror component on secondary for %s" %
3812
                  dev.iv_name)
3813
      #HARDCODE
3814
      if not _CreateBlockDevOnSecondary(cfg, remote_node, instance,
3815
                                        new_drbd, False,
3816
                                        _GetInstanceInfoText(instance)):
3817
        raise errors.OpExecError("Failed to create new component on secondary"
3818
                                 " node %s. Full abort, cleanup manually!" %
3819
                                 remote_node)
3820

  
3821
      logger.Info("adding new mirror component on primary")
3822
      #HARDCODE
3823
      if not _CreateBlockDevOnPrimary(cfg, instance.primary_node,
3824
                                      instance, new_drbd,
3825
                                      _GetInstanceInfoText(instance)):
3826
        # remove secondary dev
3827
        cfg.SetDiskID(new_drbd, remote_node)
3828
        rpc.call_blockdev_remove(remote_node, new_drbd)
3829
        raise errors.OpExecError("Failed to create volume on primary!"
3830
                                 " Full abort, cleanup manually!!")
3831

  
3832
      # the device exists now
3833
      # call the primary node to add the mirror to md
3834
      logger.Info("adding new mirror component to md")
3835
      if not rpc.call_blockdev_addchildren(instance.primary_node, dev,
3836
                                           [new_drbd]):
3837
        logger.Error("Can't add mirror compoment to md!")
3838
        cfg.SetDiskID(new_drbd, remote_node)
3839
        if not rpc.call_blockdev_remove(remote_node, new_drbd):
3840
          logger.Error("Can't rollback on secondary")
3841
        cfg.SetDiskID(new_drbd, instance.primary_node)
3842
        if not rpc.call_blockdev_remove(instance.primary_node, new_drbd):
3843
          logger.Error("Can't rollback on primary")
3844
        raise errors.OpExecError("Full abort, cleanup manually!!")
3845

  
3846
      dev.children.append(new_drbd)
3847
      cfg.AddInstance(instance)
3848

  
3849
    # this can fail as the old devices are degraded and _WaitForSync
3850
    # does a combined result over all disks, so we don't check its
3851
    # return value
3852
    _WaitForSync(cfg, instance, self.proc, unlock=True)
3853

  
3854
    # so check manually all the devices
3855
    for name in iv_names:
3856
      dev, child, new_drbd = iv_names[name]
3857
      cfg.SetDiskID(dev, instance.primary_node)
3858
      is_degr = rpc.call_blockdev_find(instance.primary_node, dev)[5]
3859
      if is_degr:
3860
        raise errors.OpExecError("MD device %s is degraded!" % name)
3861
      cfg.SetDiskID(new_drbd, instance.primary_node)
3862
      is_degr = rpc.call_blockdev_find(instance.primary_node, new_drbd)[5]
3863
      if is_degr:
3864
        raise errors.OpExecError("New drbd device %s is degraded!" % name)
3865

  
3866
    for name in iv_names:
3867
      dev, child, new_drbd = iv_names[name]
3868
      logger.Info("remove mirror %s component" % name)
3869
      cfg.SetDiskID(dev, instance.primary_node)
3870
      if not rpc.call_blockdev_removechildren(instance.primary_node,
3871
                                              dev, [child]):
3872
        logger.Error("Can't remove child from mirror, aborting"
3873
                     " *this device cleanup*.\nYou need to cleanup manually!!")
3874
        continue
3875

  
3876
      for node in child.logical_id[:2]:
3877
        logger.Info("remove child device on %s" % node)
3878
        cfg.SetDiskID(child, node)
3879
        if not rpc.call_blockdev_remove(node, child):
3880
          logger.Error("Warning: failed to remove device from node %s,"
3881
                       " continuing operation." % node)
3882

  
3883
      dev.children.remove(child)
3884

  
3885
      cfg.AddInstance(instance)
3886

  
3887 3785
  def _ExecD8DiskOnly(self, feedback_fn):
3888 3786
    """Replace a disk on the primary or secondary for dbrd8.
3889 3787

  
......
4225 4123

  
4226 4124
    """
4227 4125
    instance = self.instance
4228
    if instance.disk_template == constants.DT_REMOTE_RAID1:
4229
      fn = self._ExecRR1
4230
    elif instance.disk_template == constants.DT_DRBD8:
4126
    if instance.disk_template == constants.DT_DRBD8:
4231 4127
      if self.op.remote_node is None:
4232 4128
        fn = self._ExecD8DiskOnly
4233 4129
      else:

Also available in: Unified diff