Revision 62bfbc7d

b/lib/cmdlib.py
4500 4500
  return not cumul_degraded
4501 4501

  
4502 4502

  
4503
def _CheckDiskConsistency(lu, dev, node, on_primary, ldisk=False):
4503
def _CheckDiskConsistency(lu, instance, dev, node, on_primary, ldisk=False):
4504 4504
  """Check that mirrors are not degraded.
4505 4505

  
4506 4506
  The ldisk parameter, if True, will change the test from the
......
4529 4529

  
4530 4530
  if dev.children:
4531 4531
    for child in dev.children:
4532
      result = result and _CheckDiskConsistency(lu, child, node, on_primary)
4532
      result = result and _CheckDiskConsistency(lu, instance, child, node,
4533
                                                on_primary)
4533 4534

  
4534 4535
  return result
4535 4536

  
......
6315 6316
        node_disk = node_disk.Copy()
6316 6317
        node_disk.UnsetSize()
6317 6318
      lu.cfg.SetDiskID(node_disk, node)
6318
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, False, idx)
6319
      result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6320
                                             False, idx)
6319 6321
      msg = result.fail_msg
6320 6322
      if msg:
6321 6323
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
......
6337 6339
        node_disk = node_disk.Copy()
6338 6340
        node_disk.UnsetSize()
6339 6341
      lu.cfg.SetDiskID(node_disk, node)
6340
      result = lu.rpc.call_blockdev_assemble(node, node_disk, iname, True, idx)
6342
      result = lu.rpc.call_blockdev_assemble(node, (node_disk, instance), iname,
6343
                                             True, idx)
6341 6344
      msg = result.fail_msg
6342 6345
      if msg:
6343 6346
        lu.proc.LogWarning("Could not prepare block device %s on node %s"
......
7732 7735
    # activate, get path, copy the data over
7733 7736
    for idx, disk in enumerate(instance.disks):
7734 7737
      self.LogInfo("Copying data for disk %d", idx)
7735
      result = self.rpc.call_blockdev_assemble(target_node, disk,
7738
      result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
7736 7739
                                               instance.name, True, idx)
7737 7740
      if result.fail_msg:
7738 7741
        self.LogWarning("Can't assemble newly created disk %d: %s",
......
7740 7743
        errs.append(result.fail_msg)
7741 7744
        break
7742 7745
      dev_path = result.payload
7743
      result = self.rpc.call_blockdev_export(source_node, disk,
7746
      result = self.rpc.call_blockdev_export(source_node, (disk, instance),
7744 7747
                                             target_node, dev_path,
7745 7748
                                             cluster_name)
7746 7749
      if result.fail_msg:
......
8103 8106
      all_done = True
8104 8107
      result = self.rpc.call_drbd_wait_sync(self.all_nodes,
8105 8108
                                            self.nodes_ip,
8106
                                            self.instance.disks)
8109
                                            (self.instance.disks,
8110
                                             self.instance))
8107 8111
      min_percent = 100
8108 8112
      for node, nres in result.items():
8109 8113
        nres.Raise("Cannot resync disks on node %s" % node)
......
8149 8153
      msg = "single-master"
8150 8154
    self.feedback_fn("* changing disks into %s mode" % msg)
8151 8155
    result = self.rpc.call_drbd_attach_net(self.all_nodes, self.nodes_ip,
8152
                                           self.instance.disks,
8156
                                           (self.instance.disks, self.instance),
8153 8157
                                           self.instance.name, multimaster)
8154 8158
    for node, nres in result.items():
8155 8159
      nres.Raise("Cannot change disks config on node %s" % node)
......
8301 8305

  
8302 8306
    self.feedback_fn("* checking disk consistency between source and target")
8303 8307
    for (idx, dev) in enumerate(instance.disks):
8304
      if not _CheckDiskConsistency(self.lu, dev, target_node, False):
8308
      if not _CheckDiskConsistency(self.lu, instance, dev, target_node, False):
8305 8309
        raise errors.OpExecError("Disk %s is degraded or not fully"
8306 8310
                                 " synchronized on target node,"
8307 8311
                                 " aborting migration" % idx)
......
8464 8468
      self.feedback_fn("* checking disk consistency between source and target")
8465 8469
      for (idx, dev) in enumerate(instance.disks):
8466 8470
        # for drbd, these are drbd over lvm
8467
        if not _CheckDiskConsistency(self.lu, dev, target_node, False):
8471
        if not _CheckDiskConsistency(self.lu, instance, dev, target_node,
8472
                                     False):
8468 8473
          if primary_node.offline:
8469 8474
            self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
8470 8475
                             " target node %s" %
......
8810 8815
    lu.cfg.SetDiskID(device, node)
8811 8816

  
8812 8817
  logging.info("Pause sync of instance %s disks", instance.name)
8813
  result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, True)
8818
  result = lu.rpc.call_blockdev_pause_resume_sync(node,
8819
                                                  (instance.disks, instance),
8820
                                                  True)
8814 8821

  
8815 8822
  for idx, success in enumerate(result.payload):
8816 8823
    if not success:
......
8840 8847
        wipe_size = min(wipe_chunk_size, size - offset)
8841 8848
        logging.debug("Wiping disk %d, offset %s, chunk %s",
8842 8849
                      idx, offset, wipe_size)
8843
        result = lu.rpc.call_blockdev_wipe(node, device, offset, wipe_size)
8850
        result = lu.rpc.call_blockdev_wipe(node, (device, instance), offset,
8851
                                           wipe_size)
8844 8852
        result.Raise("Could not wipe disk %d at offset %d for size %d" %
8845 8853
                     (idx, offset, wipe_size))
8846 8854
        now = time.time()
......
8853 8861
  finally:
8854 8862
    logging.info("Resume sync of instance %s disks", instance.name)
8855 8863

  
8856
    result = lu.rpc.call_blockdev_pause_resume_sync(node, instance.disks, False)
8864
    result = lu.rpc.call_blockdev_pause_resume_sync(node,
8865
                                                    (instance.disks, instance),
8866
                                                    False)
8857 8867

  
8858 8868
    for idx, success in enumerate(result.payload):
8859 8869
      if not success:
......
10072 10082
          if pause_sync:
10073 10083
            feedback_fn("* pausing disk sync to install instance OS")
10074 10084
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10075
                                                              iobj.disks, True)
10085
                                                              (iobj.disks,
10086
                                                               iobj), True)
10076 10087
            for idx, success in enumerate(result.payload):
10077 10088
              if not success:
10078 10089
                logging.warn("pause-sync of instance %s for disk %d failed",
......
10086 10097
          if pause_sync:
10087 10098
            feedback_fn("* resuming disk sync")
10088 10099
            result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
10089
                                                              iobj.disks, False)
10100
                                                              (iobj.disks,
10101
                                                               iobj), False)
10090 10102
            for idx, success in enumerate(result.payload):
10091 10103
              if not success:
10092 10104
                logging.warn("resume-sync of instance %s for disk %d failed",
......
10766 10778
      self.lu.LogInfo("Checking disk/%d consistency on node %s" %
10767 10779
                      (idx, node_name))
10768 10780

  
10769
      if not _CheckDiskConsistency(self.lu, dev, node_name, on_primary,
10770
                                   ldisk=ldisk):
10781
      if not _CheckDiskConsistency(self.lu, self.instance, dev, node_name,
10782
                                   on_primary, ldisk=ldisk):
10771 10783
        raise errors.OpExecError("Node %s has degraded storage, unsafe to"
10772 10784
                                 " replace disks for instance %s" %
10773 10785
                                 (node_name, self.instance.name))
......
10937 10949

  
10938 10950
      # Now that the new lvs have the old name, we can add them to the device
10939 10951
      self.lu.LogInfo("Adding new mirror component on %s" % self.target_node)
10940
      result = self.rpc.call_blockdev_addchildren(self.target_node, dev,
10941
                                                  new_lvs)
10952
      result = self.rpc.call_blockdev_addchildren(self.target_node,
10953
                                                  (dev, self.instance),
10954
                                                  (new_lvs, self.instance))
10942 10955
      msg = result.fail_msg
10943 10956
      if msg:
10944 10957
        for new_lv in new_lvs:
......
11109 11122
    result = self.rpc.call_drbd_attach_net([self.instance.primary_node,
11110 11123
                                            self.new_node],
11111 11124
                                           self.node_secondary_ip,
11112
                                           self.instance.disks,
11125
                                           (self.instance.disks, self.instance),
11113 11126
                                           self.instance.name,
11114 11127
                                           False)
11115 11128
    for to_node, to_result in result.items():
......
11581 11594
    # First run all grow ops in dry-run mode
11582 11595
    for node in instance.all_nodes:
11583 11596
      self.cfg.SetDiskID(disk, node)
11584
      result = self.rpc.call_blockdev_grow(node, disk, self.delta, True)
11597
      result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11598
                                           True)
11585 11599
      result.Raise("Grow request failed to node %s" % node)
11586 11600

  
11587 11601
    # We know that (as far as we can test) operations across different
11588 11602
    # nodes will succeed, time to run it for real
11589 11603
    for node in instance.all_nodes:
11590 11604
      self.cfg.SetDiskID(disk, node)
11591
      result = self.rpc.call_blockdev_grow(node, disk, self.delta, False)
11605
      result = self.rpc.call_blockdev_grow(node, (disk, instance), self.delta,
11606
                                           False)
11592 11607
      result.Raise("Grow request failed to node %s" % node)
11593 11608

  
11594 11609
      # TODO: Rewrite code to work properly
......
11696 11711

  
11697 11712
    self.wanted_instances = instances.values()
11698 11713

  
11699
  def _ComputeBlockdevStatus(self, node, instance_name, dev):
11714
  def _ComputeBlockdevStatus(self, node, instance, dev):
11700 11715
    """Returns the status of a block device
11701 11716

  
11702 11717
    """
......
11709 11724
    if result.offline:
11710 11725
      return None
11711 11726

  
11712
    result.Raise("Can't compute disk status for %s" % instance_name)
11727
    result.Raise("Can't compute disk status for %s" % instance.name)
11713 11728

  
11714 11729
    status = result.payload
11715 11730
    if status is None:
......
11731 11746
        snode = dev.logical_id[0]
11732 11747

  
11733 11748
    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
11734
                                              instance.name, dev)
11735
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance.name, dev)
11749
                                              instance, dev)
11750
    dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
11736 11751

  
11737 11752
    if dev.children:
11738 11753
      dev_children = map(compat.partial(self._ComputeDiskStatus,
b/lib/masterd/instance.py
1164 1164

  
1165 1165
      # result.payload will be a snapshot of an lvm leaf of the one we
1166 1166
      # passed
1167
      result = self._lu.rpc.call_blockdev_snapshot(src_node, disk)
1167
      result = self._lu.rpc.call_blockdev_snapshot(src_node, (disk, instance))
1168 1168
      new_dev = False
1169 1169
      msg = result.fail_msg
1170 1170
      if msg:

Also available in: Unified diff