Revision 2c2f257d

b/lib/cmdlib.py
1704 1704
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
1705 1705

  
1706 1706
    # Get instances in node group; this is unsafe and needs verification later
1707
    inst_names = self.cfg.GetNodeGroupInstances(self.group_uuid)
1707
    inst_names = \
1708
      self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
1708 1709

  
1709 1710
    self.needed_locks = {
1710 1711
      locking.LEVEL_INSTANCE: inst_names,
......
1738 1739
    self.group_info = self.cfg.GetNodeGroup(self.group_uuid)
1739 1740

  
1740 1741
    group_nodes = set(self.group_info.members)
1741
    group_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
1742
    group_instances = \
1743
      self.cfg.GetNodeGroupInstances(self.group_uuid, primary_only=True)
1742 1744

  
1743 1745
    unlocked_nodes = \
1744 1746
        group_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
......
1748 1750

  
1749 1751
    if unlocked_nodes:
1750 1752
      raise errors.OpPrereqError("Missing lock for nodes: %s" %
1751
                                 utils.CommaJoin(unlocked_nodes))
1753
                                 utils.CommaJoin(unlocked_nodes),
1754
                                 errors.ECODE_STATE)
1752 1755

  
1753 1756
    if unlocked_instances:
1754 1757
      raise errors.OpPrereqError("Missing lock for instances: %s" %
1755
                                 utils.CommaJoin(unlocked_instances))
1758
                                 utils.CommaJoin(unlocked_instances),
1759
                                 errors.ECODE_STATE)
1756 1760

  
1757 1761
    self.all_node_info = self.cfg.GetAllNodesInfo()
1758 1762
    self.all_inst_info = self.cfg.GetAllInstancesInfo()
......
1772 1776

  
1773 1777
    for inst in self.my_inst_info.values():
1774 1778
      if inst.disk_template in constants.DTS_INT_MIRROR:
1775
        group = self.my_node_info[inst.primary_node].group
1776
        for nname in inst.secondary_nodes:
1777
          if self.all_node_info[nname].group != group:
1779
        for nname in inst.all_nodes:
1780
          if self.all_node_info[nname].group != self.group_uuid:
1778 1781
            extra_lv_nodes.add(nname)
1779 1782

  
1780 1783
    unlocked_lv_nodes = \
1781 1784
        extra_lv_nodes.difference(self.owned_locks(locking.LEVEL_NODE))
1782 1785

  
1783 1786
    if unlocked_lv_nodes:
1784
      raise errors.OpPrereqError("these nodes could be locked: %s" %
1785
                                 utils.CommaJoin(unlocked_lv_nodes))
1787
      raise errors.OpPrereqError("Missing node locks for LV check: %s" %
1788
                                 utils.CommaJoin(unlocked_lv_nodes),
1789
                                 errors.ECODE_STATE)
1786 1790
    self.extra_lv_nodes = list(extra_lv_nodes)
1787 1791

  
1788 1792
  def _VerifyNode(self, ninfo, nresult):
......
2052 2056

  
2053 2057
    """
2054 2058
    for node, n_img in node_image.items():
2055
      if n_img.offline or n_img.rpc_fail or n_img.lvm_fail:
2059
      if (n_img.offline or n_img.rpc_fail or n_img.lvm_fail or
2060
          self.all_node_info[node].group != self.group_uuid):
2056 2061
        # skip non-healthy nodes
2057 2062
        continue
2058 2063
      for volume in n_img.volumes:
......
2079 2084
      # WARNING: we currently take into account down instances as well
2080 2085
      # as up ones, considering that even if they're down someone
2081 2086
      # might want to start them even in the event of a node failure.
2082
      if n_img.offline:
2083
        # we're skipping offline nodes from the N+1 warning, since
2084
        # most likely we don't have good memory infromation from them;
2085
        # we already list instances living on such nodes, and that's
2086
        # enough warning
2087
      if n_img.offline or self.all_node_info[node].group != self.group_uuid:
2088
        # we're skipping nodes marked offline and nodes in other groups from
2089
        # the N+1 warning, since most likely we don't have good memory
2090
        # infromation from them; we already list instances living on such
2091
        # nodes, and that's enough warning
2087 2092
        continue
2088 2093
      for prinode, instances in n_img.sbp.items():
2089 2094
        needed_mem = 0

Also available in: Unified diff