Revision 4d32c211

b/lib/client/gnt_node.py
656 656

  
657 657
  """
658 658
  all_changes = [opts.master_candidate, opts.drained, opts.offline,
659
                 opts.master_capable, opts.vm_capable]
659
                 opts.master_capable, opts.vm_capable, opts.secondary_ip]
660 660
  if all_changes.count(None) == len(all_changes):
661 661
    ToStderr("Please give at least one of the parameters.")
662 662
    return 1
......
667 667
                               drained=opts.drained,
668 668
                               master_capable=opts.master_capable,
669 669
                               vm_capable=opts.vm_capable,
670
                               secondary_ip=opts.secondary_ip,
670 671
                               force=opts.force,
671 672
                               auto_promote=opts.auto_promote)
672 673

  
......
720 721
  'modify': (
721 722
    SetNodeParams, ARGS_ONE_NODE,
722 723
    [FORCE_OPT, SUBMIT_OPT, MC_OPT, DRAINED_OPT, OFFLINE_OPT,
723
     CAPAB_MASTER_OPT, CAPAB_VM_OPT,
724
     CAPAB_MASTER_OPT, CAPAB_VM_OPT, SECONDARY_IP_OPT,
724 725
     AUTO_PROMOTE_OPT, DRY_RUN_OPT, PRIORITY_OPT],
725 726
    "<node_name>", "Alters the parameters of a node"),
726 727
  'powercycle': (
b/lib/cmdlib.py
650 650
    _CheckOSVariant(result.payload, os_name)
651 651

  
652 652

  
653
def _CheckNodeHasSecondaryIP(lu, node, secondary_ip, prereq):
654
  """Ensure that a node has the given secondary ip.
655

  
656
  @type lu: L{LogicalUnit}
657
  @param lu: the LU on behalf of which we make the check
658
  @type node: string
659
  @param node: the node to check
660
  @type secondary_ip: string
661
  @param secondary_ip: the ip to check
662
  @type prereq: boolean
663
  @param prereq: whether to throw a prerequisite or an execute error
664
  @raise errors.OpPrereqError: if the node doesn't have the ip, and prereq=True
665
  @raise errors.OpExecError: if the node doesn't have the ip, and prereq=False
666

  
667
  """
668
  result = lu.rpc.call_node_has_ip_address(node, secondary_ip)
669
  result.Raise("Failure checking secondary ip on node %s" % node,
670
               prereq=prereq, ecode=errors.ECODE_ENVIRON)
671
  if not result.payload:
672
    msg = ("Node claims it doesn't have the secondary ip you gave (%s),"
673
           " please fix and re-run this command" % secondary_ip)
674
    if prereq:
675
      raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
676
    else:
677
      raise errors.OpExecError(msg)
678

  
679

  
653 680
def _RequireFileStorage():
654 681
  """Checks that file storage is enabled.
655 682

  
......
3832 3859
      if not netutils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
3833 3860
                           source=myself.secondary_ip):
3834 3861
        raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
3835
                                   " based ping to noded port",
3862
                                   " based ping to node daemon port",
3836 3863
                                   errors.ECODE_ENVIRON)
3837 3864

  
3838 3865
    if self.op.readd:
......
3904 3931
      result.Raise("Can't update hosts file with new host data")
3905 3932

  
3906 3933
    if new_node.secondary_ip != new_node.primary_ip:
3907
      result = self.rpc.call_node_has_ip_address(new_node.name,
3908
                                                 new_node.secondary_ip)
3909
      result.Raise("Failure checking secondary ip on node %s" % new_node.name,
3910
                   prereq=True, ecode=errors.ECODE_ENVIRON)
3911
      if not result.payload:
3912
        raise errors.OpExecError("Node claims it doesn't have the secondary ip"
3913
                                 " you gave (%s). Please fix and re-run this"
3914
                                 " command." % new_node.secondary_ip)
3934
      _CheckNodeHasSecondaryIP(self, new_node.name, new_node.secondary_ip,
3935
                               False)
3915 3936

  
3916 3937
    node_verify_list = [self.cfg.GetMasterNode()]
3917 3938
    node_verify_param = {
......
3968 3989
    ("auto_promote", False, ht.TBool),
3969 3990
    ("master_capable", None, ht.TMaybeBool),
3970 3991
    ("vm_capable", None, ht.TMaybeBool),
3992
    ("secondary_ip", None, ht.TMaybeString),
3971 3993
    _PForce,
3972 3994
    ]
3973 3995
  REQ_BGL = False
......
3984 4006
  def CheckArguments(self):
3985 4007
    self.op.node_name = _ExpandNodeName(self.cfg, self.op.node_name)
3986 4008
    all_mods = [self.op.offline, self.op.master_candidate, self.op.drained,
3987
                self.op.master_capable, self.op.vm_capable]
4009
                self.op.master_capable, self.op.vm_capable,
4010
                self.op.secondary_ip]
3988 4011
    if all_mods.count(None) == len(all_mods):
3989 4012
      raise errors.OpPrereqError("Please pass at least one modification",
3990 4013
                                 errors.ECODE_INVAL)
......
3999 4022
                         self.op.drained == True or
4000 4023
                         self.op.master_capable == False)
4001 4024

  
4025
    if self.op.secondary_ip:
4026
      if not netutils.IP4Address.IsValid(self.op.secondary_ip):
4027
        raise errors.OpPrereqError("Secondary IP (%s) needs to be a valid IPv4"
4028
                                   " address" % self.op.secondary_ip,
4029
                                   errors.ECODE_INVAL)
4030

  
4002 4031
    self.lock_all = self.op.auto_promote and self.might_demote
4032
    self.lock_instances = self.op.secondary_ip is not None
4003 4033

  
4004 4034
  def ExpandNames(self):
4005 4035
    if self.lock_all:
......
4007 4037
    else:
4008 4038
      self.needed_locks = {locking.LEVEL_NODE: self.op.node_name}
4009 4039

  
4040
    if self.lock_instances:
4041
      self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
4042

  
4043
  def DeclareLocks(self, level):
4044
    # If we have locked all instances, before waiting to lock nodes, release
4045
    # all the ones living on nodes unrelated to the current operation.
4046
    if level == locking.LEVEL_NODE and self.lock_instances:
4047
      instances_release = []
4048
      instances_keep = []
4049
      self.affected_instances = []
4050
      if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
4051
        for instance_name in self.acquired_locks[locking.LEVEL_INSTANCE]:
4052
          instance = self.context.cfg.GetInstanceInfo(instance_name)
4053
          i_mirrored = instance.disk_template in constants.DTS_NET_MIRROR
4054
          if i_mirrored and self.op.node_name in instance.all_nodes:
4055
            instances_keep.append(instance_name)
4056
            self.affected_instances.append(instance)
4057
          else:
4058
            instances_release.append(instance_name)
4059
        if instances_release:
4060
          self.context.glm.release(locking.LEVEL_INSTANCE, instances_release)
4061
          self.acquired_locks[locking.LEVEL_INSTANCE] = instances_keep
4062

  
4010 4063
  def BuildHooksEnv(self):
4011 4064
    """Build hooks env.
4012 4065

  
......
4121 4174
                        " without using re-add. Please make sure the node"
4122 4175
                        " is healthy!")
4123 4176

  
4177
    if self.op.secondary_ip:
4178
      # Ok even without locking, because this can't be changed by any LU
4179
      master = self.cfg.GetNodeInfo(self.cfg.GetMasterNode())
4180
      master_singlehomed = master.secondary_ip == master.primary_ip
4181
      if master_singlehomed and self.op.secondary_ip:
4182
        raise errors.OpPrereqError("Cannot change the secondary ip on a single"
4183
                                   " homed cluster", errors.ECODE_INVAL)
4184

  
4185
      if node.offline:
4186
        if self.affected_instances:
4187
          raise errors.OpPrereqError("Cannot change secondary ip: offline"
4188
                                     " node has instances (%s) configured"
4189
                                     " to use it" % self.affected_instances)
4190
      else:
4191
        # On online nodes, check that no instances are running, and that
4192
        # the node has the new ip and we can reach it.
4193
        for instance in self.affected_instances:
4194
          _CheckInstanceDown(self, instance, "cannot change secondary ip")
4195

  
4196
        _CheckNodeHasSecondaryIP(self, node.name, self.op.secondary_ip, True)
4197
        if master.name != node.name:
4198
          # check reachability from master secondary ip to new secondary ip
4199
          if not netutils.TcpPing(self.op.secondary_ip,
4200
                                  constants.DEFAULT_NODED_PORT,
4201
                                  source=master.secondary_ip):
4202
            raise errors.OpPrereqError("Node secondary ip not reachable by TCP"
4203
                                       " based ping to node daemon port",
4204
                                       errors.ECODE_ENVIRON)
4205

  
4124 4206
  def Exec(self, feedback_fn):
4125 4207
    """Modifies a node.
4126 4208

  
......
4154 4236
      if self.lock_all:
4155 4237
        _AdjustCandidatePool(self, [node.name])
4156 4238

  
4239
    if self.op.secondary_ip:
4240
      node.secondary_ip = self.op.secondary_ip
4241
      result.append(("secondary_ip", self.op.secondary_ip))
4242

  
4157 4243
    # this will trigger configuration file update, if needed
4158 4244
    self.cfg.Update(node, feedback_fn)
4159 4245

  
b/lib/opcodes.py
437 437
    "auto_promote",
438 438
    "master_capable",
439 439
    "vm_capable",
440
    "secondary_ip",
440 441
    ]
441 442

  
442 443

  
b/man/gnt-node.sgml
637 637
        <arg>--offline=<option>yes|no</option></arg>
638 638
        <arg>--master-capable=<option>yes|no</option></arg>
639 639
        <arg>--vm-capable=<option>yes|no</option></arg>
640
        <arg>-s <replaceable>secondary_ip</replaceable></arg>
640 641
        <arg>--auto-promote</arg>
641 642
        <arg choice="req"><replaceable>node</replaceable></arg>
642 643
      </cmdsynopsis>
......
671 672
        </screen>
672 673
      </para>
673 674

  
675
      <para>
676
        The <option>-s</option> can be used to change the node's secondary ip.
677
        No drbd instances can be running on the node, while this operation is
678
        taking place.
679
      </para>
680

  
674 681
      <para>Example (setting the node back to online and master candidate):
675 682
        <screen>
676 683
# gnt-node modify --offline=no --master-candidate=yes node1.example.com

Also available in: Unified diff