Revision ae1a845c lib/cmdlib.py

b/lib/cmdlib.py
2867 2867
  REQ_BGL = False
2868 2868

  
2869 2869
  def ExpandNames(self):
2870
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2870 2871
    self.needed_locks = {
2871
      locking.LEVEL_NODE: locking.ALL_SET,
2872
      locking.LEVEL_INSTANCE: locking.ALL_SET,
2873
    }
2872
      locking.LEVEL_NODEGROUP: locking.ALL_SET,
2873
      }
2874

  
2875
  def Exec(self, feedback_fn):
2876
    group_names = self.glm.list_owned(locking.LEVEL_NODEGROUP)
2877

  
2878
    # Submit one instance of L{opcodes.OpGroupVerifyDisks} per node group
2879
    return ResultWithJobs([[opcodes.OpGroupVerifyDisks(group_name=group)]
2880
                           for group in group_names])
2881

  
2882

  
2883
class LUGroupVerifyDisks(NoHooksLU):
2884
  """Verifies the status of all disks in a node group.
2885

  
2886
  """
2887
  REQ_BGL = False
2888

  
2889
  def ExpandNames(self):
2890
    # Raises errors.OpPrereqError on its own if group can't be found
2891
    self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
2892

  
2874 2893
    self.share_locks = dict.fromkeys(locking.LEVELS, 1)
2894
    self.needed_locks = {
2895
      locking.LEVEL_INSTANCE: [],
2896
      locking.LEVEL_NODEGROUP: [],
2897
      locking.LEVEL_NODE: [],
2898
      }
2899

  
2900
  def DeclareLocks(self, level):
2901
    if level == locking.LEVEL_INSTANCE:
2902
      assert not self.needed_locks[locking.LEVEL_INSTANCE]
2903

  
2904
      # Lock instances optimistically, needs verification once node and group
2905
      # locks have been acquired
2906
      self.needed_locks[locking.LEVEL_INSTANCE] = \
2907
        self.cfg.GetNodeGroupInstances(self.group_uuid)
2908

  
2909
    elif level == locking.LEVEL_NODEGROUP:
2910
      assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2911

  
2912
      self.needed_locks[locking.LEVEL_NODEGROUP] = \
2913
        set([self.group_uuid] +
2914
            # Lock all groups used by instances optimistically; this requires
2915
            # going via the node before it's locked, requiring verification
2916
            # later on
2917
            [group_uuid
2918
             for instance_name in
2919
               self.glm.list_owned(locking.LEVEL_INSTANCE)
2920
             for group_uuid in
2921
               self.cfg.GetInstanceNodeGroups(instance_name)])
2922

  
2923
    elif level == locking.LEVEL_NODE:
2924
      # This will only lock the nodes in the group to be verified which contain
2925
      # actual instances
2926
      self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
2927
      self._LockInstancesNodes()
2928

  
2929
      # Lock all nodes in group to be verified
2930
      assert self.group_uuid in self.glm.list_owned(locking.LEVEL_NODEGROUP)
2931
      member_nodes = self.cfg.GetNodeGroup(self.group_uuid).members
2932
      self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
2933

  
2934
  def CheckPrereq(self):
2935
    owned_instances = frozenset(self.glm.list_owned(locking.LEVEL_INSTANCE))
2936
    owned_groups = frozenset(self.glm.list_owned(locking.LEVEL_NODEGROUP))
2937
    owned_nodes = frozenset(self.glm.list_owned(locking.LEVEL_NODE))
2938

  
2939
    assert self.group_uuid in owned_groups
2940

  
2941
    # Check if locked instances are still correct
2942
    wanted_instances = self.cfg.GetNodeGroupInstances(self.group_uuid)
2943
    if owned_instances != wanted_instances:
2944
      raise errors.OpPrereqError("Instances in node group %s changed since"
2945
                                 " locks were acquired, wanted %s, have %s;"
2946
                                 " retry the operation" %
2947
                                 (self.op.group_name,
2948
                                  utils.CommaJoin(wanted_instances),
2949
                                  utils.CommaJoin(owned_instances)),
2950
                                 errors.ECODE_STATE)
2951

  
2952
    # Get instance information
2953
    self.instances = dict((name, self.cfg.GetInstanceInfo(name))
2954
                          for name in owned_instances)
2955

  
2956
    # Check if node groups for locked instances are still correct
2957
    for (instance_name, inst) in self.instances.items():
2958
      assert self.group_uuid in self.cfg.GetInstanceNodeGroups(instance_name), \
2959
        "Instance %s has no node in group %s" % (instance_name, self.group_uuid)
2960
      assert owned_nodes.issuperset(inst.all_nodes), \
2961
        "Instance %s's nodes changed while we kept the lock" % instance_name
2962

  
2963
      inst_groups = self.cfg.GetInstanceNodeGroups(instance_name)
2964
      if not owned_groups.issuperset(inst_groups):
2965
        raise errors.OpPrereqError("Instance %s's node groups changed since"
2966
                                   " locks were acquired, current groups are"
2967
                                   " are '%s', owning groups '%s'; retry the"
2968
                                   " operation" %
2969
                                   (instance_name,
2970
                                    utils.CommaJoin(inst_groups),
2971
                                    utils.CommaJoin(owned_groups)),
2972
                                   errors.ECODE_STATE)
2875 2973

  
2876 2974
  def Exec(self, feedback_fn):
2877 2975
    """Verify integrity of cluster disks.
......
2882 2980
        missing volumes
2883 2981

  
2884 2982
    """
2885
    result = res_nodes, res_instances, res_missing = {}, [], {}
2983
    res_nodes = {}
2984
    res_instances = set()
2985
    res_missing = {}
2886 2986

  
2887
    nodes = utils.NiceSort(self.cfg.GetVmCapableNodeList())
2888
    instances = self.cfg.GetAllInstancesInfo().values()
2987
    nv_dict = _MapInstanceDisksToNodes([inst
2988
                                        for inst in self.instances.values()
2989
                                        if inst.admin_up])
2889 2990

  
2890
    nv_dict = {}
2891
    for inst in instances:
2892
      inst_lvs = {}
2893
      if not inst.admin_up:
2894
        continue
2895
      inst.MapLVsByNode(inst_lvs)
2896
      # transform { iname: {node: [vol,],},} to {(node, vol): iname}
2897
      for node, vol_list in inst_lvs.iteritems():
2898
        for vol in vol_list:
2899
          nv_dict[(node, vol)] = inst
2900

  
2901
    if not nv_dict:
2902
      return result
2903

  
2904
    node_lvs = self.rpc.call_lv_list(nodes, [])
2905
    for node, node_res in node_lvs.items():
2906
      if node_res.offline:
2907
        continue
2908
      msg = node_res.fail_msg
2909
      if msg:
2910
        logging.warning("Error enumerating LVs on node %s: %s", node, msg)
2911
        res_nodes[node] = msg
2912
        continue
2991
    if nv_dict:
2992
      nodes = utils.NiceSort(set(self.glm.list_owned(locking.LEVEL_NODE)) &
2993
                             set(self.cfg.GetVmCapableNodeList()))
2913 2994

  
2914
      lvs = node_res.payload
2915
      for lv_name, (_, _, lv_online) in lvs.items():
2916
        inst = nv_dict.pop((node, lv_name), None)
2917
        if (not lv_online and inst is not None
2918
            and inst.name not in res_instances):
2919
          res_instances.append(inst.name)
2995
      node_lvs = self.rpc.call_lv_list(nodes, [])
2920 2996

  
2921
    # any leftover items in nv_dict are missing LVs, let's arrange the
2922
    # data better
2923
    for key, inst in nv_dict.iteritems():
2924
      if inst.name not in res_missing:
2925
        res_missing[inst.name] = []
2926
      res_missing[inst.name].append(key)
2997
      for (node, node_res) in node_lvs.items():
2998
        if node_res.offline:
2999
          continue
2927 3000

  
2928
    return result
3001
        msg = node_res.fail_msg
3002
        if msg:
3003
          logging.warning("Error enumerating LVs on node %s: %s", node, msg)
3004
          res_nodes[node] = msg
3005
          continue
3006

  
3007
        for lv_name, (_, _, lv_online) in node_res.payload.items():
3008
          inst = nv_dict.pop((node, lv_name), None)
3009
          if not (lv_online or inst is None):
3010
            res_instances.add(inst)
3011

  
3012
      # any leftover items in nv_dict are missing LVs, let's arrange the data
3013
      # better
3014
      for key, inst in nv_dict.iteritems():
3015
        res_missing.setdefault(inst, []).append(key)
3016

  
3017
    return (res_nodes, list(res_instances), res_missing)
2929 3018

  
2930 3019

  
2931 3020
class LUClusterRepairDiskSizes(NoHooksLU):

Also available in: Unified diff