Revision 077114cd lib/cmdlib.py

b/lib/cmdlib.py
3904 3904
    ("drained", None, ht.TMaybeBool),
3905 3905
    ("auto_promote", False, ht.TBool),
3906 3906
    ("master_capable", None, ht.TMaybeBool),
3907
    ("vm_capable", None, ht.TMaybeBool),
3907 3908
    _PForce,
3908 3909
    ]
3909 3910
  REQ_BGL = False
......
3920 3921
  def CheckArguments(self):
3921 3922
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3922 3923
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
3923
                self.op.master_capable]
3924
                self.op.master_capable, self.op.vm_capable]
3924 3925
    if all_mods.count(None) == len(all_mods):
3925 3926
      raise errors.OpPrereqError("Please pass at least one modification",
3926 3927
                                 errors.ECODE_INVAL)
......
3955 3956
      "OFFLINE": str(self.op.offline),
3956 3957
      "DRAINED": str(self.op.drained),
3957 3958
      "MASTER_CAPABLE": str(self.op.master_capable),
3959
      "VM_CAPABLE": str(self.op.vm_capable),
3958 3960
      }
3959 3961
    nl = [self.cfg.GetMasterNode(),
3960 3962
          self.op.node_name]
......
3982 3984
                                 " it a master candidate" % node.name,
3983 3985
                                 errors.ECODE_STATE)
3984 3986

  
3987
    if self.op.vm_capable == False:
3988
      (ipri, isec) = self.cfg.GetNodeInstances(self.op.node_name)
3989
      if ipri or isec:
3990
        raise errors.OpPrereqError("Node %s hosts instances, cannot unset"
3991
                                   " the vm_capable flag" % node.name,
3992
                                   errors.ECODE_STATE)
3993

  
3985 3994
    if node.master_candidate and self.might_demote and not self.lock_all:
3986 3995
      assert not self.op.auto_promote, "auto-promote set but lock_all not"
3987 3996
      # check if after removing the current node, we're missing master
......
4043 4052
      new_role = old_role
4044 4053

  
4045 4054
    result = []
4046
    changed_mc = [old_role, new_role].count(self._ROLE_CANDIDATE) == 1
4047 4055

  
4048
    if self.op.master_capable is not None:
4049
      node.master_capable = self.op.master_capable
4050
      result.append(("master_capable", str(self.op.master_capable)))
4056
    for attr in ["master_capable", "vm_capable"]:
4057
      val = getattr(self.op, attr)
4058
      if val is not None:
4059
        setattr(node, attr, val)
4060
        result.append((attr, str(val)))
4051 4061

  
4052
    # Tell the node to demote itself, if no longer MC and not offline
4053
    if (old_role == self._ROLE_CANDIDATE and
4054
        new_role != self._ROLE_OFFLINE and new_role != old_role):
4055
      msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4056
      if msg:
4057
        self.LogWarning("Node failed to demote itself: %s", msg)
4062
    if new_role != old_role:
4063
      # Tell the node to demote itself, if no longer MC and not offline
4064
      if old_role == self._ROLE_CANDIDATE and new_role != self._ROLE_OFFLINE:
4065
        msg = self.rpc.call_node_demote_from_mc(node.name).fail_msg
4066
        if msg:
4067
          self.LogWarning("Node failed to demote itself: %s", msg)
4058 4068

  
4059
    new_flags = self._R2F[new_role]
4060
    for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4061
      if of != nf:
4062
        result.append((desc, str(nf)))
4063
    (node.master_candidate, node.drained, node.offline) = new_flags
4069
      new_flags = self._R2F[new_role]
4070
      for of, nf, desc in zip(self.old_flags, new_flags, self._FLAGS):
4071
        if of != nf:
4072
          result.append((desc, str(nf)))
4073
      (node.master_candidate, node.drained, node.offline) = new_flags
4064 4074

  
4065
    # we locked all nodes, we adjust the CP before updating this node
4066
    if self.lock_all:
4067
      _AdjustCandidatePool(self, [node.name])
4075
      # we locked all nodes, we adjust the CP before updating this node
4076
      if self.lock_all:
4077
        _AdjustCandidatePool(self, [node.name])
4068 4078

  
4069 4079
    # this will trigger configuration file update, if needed
4070 4080
    self.cfg.Update(node, feedback_fn)
4071 4081

  
4072
    # this will trigger job queue propagation or cleanup
4073
    if changed_mc:
4082
    # this will trigger job queue propagation or cleanup if the mc
4083
    # flag changed
4084
    if [old_role, new_role].count(self._ROLE_CANDIDATE) == 1:
4074 4085
      self.context.ReaddNode(node)
4075 4086

  
4076 4087
    return result

Also available in: Unified diff