Revision a9f33339

b/lib/backend.py
929 929
    result[constants.NV_HVINFO] = hyper.GetNodeInfo(hvparams=hvparams)
930 930

  
931 931

  
932
def VerifyNode(what, cluster_name, all_hvparams):
932
def VerifyNode(what, cluster_name, all_hvparams, node_groups, groups_cfg):
933 933
  """Verify the status of the local node.
934 934

  
935 935
  Based on the input L{what} parameter, various checks are done on the
......
957 957
  @param cluster_name: the cluster's name
958 958
  @type all_hvparams: dict of dict of strings
959 959
  @param all_hvparams: a dictionary mapping hypervisor names to hvparams
960
  @type node_groups: a dict of strings
961
  @param node_groups: node _names_ mapped to their group uuids (it's enough to
962
      have only those nodes that are in `what["nodelist"]`)
963
  @type groups_cfg: a dict of dict of strings
964
  @param groups_cfg: a dictionary mapping group uuids to their configuration
960 965
  @rtype: dict
961 966
  @return: a dictionary with the same keys as the input dict, and
962 967
      values representing the result of the checks
......
992 997
    # Try to contact all nodes
993 998
    val = {}
994 999
    for node in nodes:
995
      success, message = _GetSshRunner(cluster_name).VerifyNodeHostname(node)
1000
      params = groups_cfg.get(node_groups.get(node))
1001
      ssh_port = params["ndparams"].get(constants.ND_SSH_PORT)
1002
      logging.debug("Ssh port %s (None = default) for node %s",
1003
                    str(ssh_port), node)
1004
      success, message = _GetSshRunner(cluster_name). \
1005
                            VerifyNodeHostname(node, ssh_port)
996 1006
      if not success:
997 1007
        val[node] = message
998 1008

  
b/lib/bootstrap.py
280 280

  
281 281

  
282 282
def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose,
283
                    use_cluster_key, ask_key, strict_host_check, data):
283
                    use_cluster_key, ask_key, strict_host_check,
284
                    port, data):
284 285
  """Runs a command to configure something on a remote machine.
285 286

  
286 287
  @type cluster_name: string
......
299 300
  @param ask_key: See L{ssh.SshRunner.BuildCmd}
300 301
  @type strict_host_check: bool
301 302
  @param strict_host_check: See L{ssh.SshRunner.BuildCmd}
303
  @type port: int
304
  @param port: The SSH port of the remote machine or None for the default
302 305
  @param data: JSON-serializable input data for script (passed to stdin)
303 306

  
304 307
  """
......
311 314
  if verbose:
312 315
    cmd.append("--verbose")
313 316

  
317
  if port is None:
318
    port = netutils.GetDaemonPort(constants.SSH)
319

  
314 320
  family = ssconf.SimpleStore().GetPrimaryIPFamily()
315 321
  srun = ssh.SshRunner(cluster_name,
316 322
                       ipv6=(family == netutils.IP6Address.family))
......
318 324
                       utils.ShellQuoteArgs(cmd),
319 325
                       batch=False, ask_key=ask_key, quiet=False,
320 326
                       strict_host_check=strict_host_check,
321
                       use_cluster_key=use_cluster_key)
327
                       use_cluster_key=use_cluster_key,
328
                       port=port)
322 329

  
323 330
  tempfh = tempfile.TemporaryFile()
324 331
  try:
......
333 340
    raise errors.OpExecError("Command '%s' failed: %s" %
334 341
                             (result.cmd, result.fail_reason))
335 342

  
336
  _WaitForSshDaemon(node, netutils.GetDaemonPort(constants.SSH), family)
343
  _WaitForSshDaemon(node, port, family)
337 344

  
338 345

  
339 346
def _InitFileStorageDir(file_storage_dir):
......
874 881
                    " the node: %s", msg)
875 882

  
876 883

  
877
def SetupNodeDaemon(opts, cluster_name, node):
884
def SetupNodeDaemon(opts, cluster_name, node, ssh_port):
878 885
  """Add a node to the cluster.
879 886

  
880 887
  This function must be called before the actual opcode, and will ssh
......
883 890

  
884 891
  @param cluster_name: the cluster name
885 892
  @param node: the name of the new node
893
  @param ssh_port: the SSH port of the new node
886 894

  
887 895
  """
888 896
  data = {
......
895 903

  
896 904
  RunNodeSetupCmd(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
897 905
                  opts.debug, opts.verbose,
898
                  True, opts.ssh_key_check, opts.ssh_key_check, data)
906
                  True, opts.ssh_key_check, opts.ssh_key_check,
907
                  ssh_port, data)
899 908

  
900 909
  _WaitForNodeDaemon(node)
901 910

  
b/lib/client/gnt_instance.py
899 899

  
900 900
    srun = ssh.SshRunner(cluster_name=cluster_name)
901 901
    ssh_cmd = srun.BuildCmd(console.host, console.user, cmd,
902
                            port=console.port,
902 903
                            batch=True, quiet=False, tty=True)
903 904

  
904 905
    if show_command:
b/lib/client/gnt_node.py
193 193
  return result
194 194

  
195 195

  
196
def _SetupSSH(options, cluster_name, node):
196
def _SetupSSH(options, cluster_name, node, ssh_port):
197 197
  """Configures a destination node's SSH daemon.
198 198

  
199 199
  @param options: Command line options
......
201 201
  @param cluster_name: Cluster name
202 202
  @type node: string
203 203
  @param node: Destination node name
204
  @type ssh_port: int
205
  @param ssh_port: Destination node ssh port
204 206

  
205 207
  """
206 208
  if options.force_join:
......
226 228

  
227 229
  bootstrap.RunNodeSetupCmd(cluster_name, node, pathutils.PREPARE_NODE_JOIN,
228 230
                            options.debug, options.verbose, False,
229
                            options.ssh_key_check, options.ssh_key_check, data)
231
                            options.ssh_key_check, options.ssh_key_check,
232
                            ssh_port, data)
230 233

  
231 234

  
232 235
@UsesRPC
......
244 247
  node = netutils.GetHostname(name=args[0]).name
245 248
  readd = opts.readd
246 249

  
250
  # Retrieve relevant parameters of the node group.
251
  ssh_port = None
252
  if opts.nodegroup:
253
    try:
254
      output = cl.QueryGroups(names=[opts.nodegroup], fields=["ndp/ssh_port"],
255
                              use_locking=False)
256
      (ssh_port, ) = output[0]
257
    except (errors.OpPrereqError, errors.OpExecError):
258
      pass
259

  
247 260
  try:
248
    output = cl.QueryNodes(names=[node], fields=["name", "sip", "master"],
261
    output = cl.QueryNodes(names=[node],
262
                           fields=["name", "sip", "master", "ndp/ssh_port"],
249 263
                           use_locking=False)
250
    node_exists, sip, is_master = output[0]
264
    node_exists, sip, is_master, ssh_port = output[0]
251 265
  except (errors.OpPrereqError, errors.OpExecError):
252 266
    node_exists = ""
253 267
    sip = None
......
279 293
             "and grant full intra-cluster ssh root access to/from it\n", node)
280 294

  
281 295
  if opts.node_setup:
282
    _SetupSSH(opts, cluster_name, node)
296
    _SetupSSH(opts, cluster_name, node, ssh_port)
283 297

  
284
  bootstrap.SetupNodeDaemon(opts, cluster_name, node)
298
  bootstrap.SetupNodeDaemon(opts, cluster_name, node, ssh_port)
285 299

  
286 300
  if opts.disk_state:
287 301
    disk_state = utils.FlatToDict(opts.disk_state)
b/lib/cmdlib/cluster.py
3023 3023
    if self._exclusive_storage:
3024 3024
      node_verify_param[constants.NV_EXCLUSIVEPVS] = True
3025 3025

  
3026
    node_group_uuids = dict(map(lambda n: (n.name, n.group),
3027
                                self.cfg.GetAllNodesInfo().values()))
3028
    groups_config = self.cfg.GetAllNodeGroupsInfoDict()
3029

  
3026 3030
    # At this point, we have the in-memory data structures complete,
3027 3031
    # except for the runtime information, which we'll gather next
3028 3032

  
......
3034 3038
    all_nvinfo = self.rpc.call_node_verify(self.my_node_uuids,
3035 3039
                                           node_verify_param,
3036 3040
                                           self.cfg.GetClusterName(),
3037
                                           self.cfg.GetClusterInfo().hvparams)
3041
                                           self.cfg.GetClusterInfo().hvparams,
3042
                                           node_group_uuids,
3043
                                           groups_config)
3038 3044
    nvinfo_endtime = time.time()
3039 3045

  
3040 3046
    if self.extra_lv_nodes and vg_name is not None:
......
3042 3048
          self.rpc.call_node_verify(self.extra_lv_nodes,
3043 3049
                                    {constants.NV_LVLIST: vg_name},
3044 3050
                                    self.cfg.GetClusterName(),
3045
                                    self.cfg.GetClusterInfo().hvparams)
3051
                                    self.cfg.GetClusterInfo().hvparams,
3052
                                    node_group_uuids,
3053
                                    groups_config)
3046 3054
    else:
3047 3055
      extra_lv_nvinfo = {}
3048 3056

  
......
3077 3085
      key = constants.NV_FILELIST
3078 3086
      vf_nvinfo.update(self.rpc.call_node_verify(
3079 3087
         additional_node_uuids, {key: node_verify_param[key]},
3080
         self.cfg.GetClusterName(), self.cfg.GetClusterInfo().hvparams))
3088
         self.cfg.GetClusterName(), self.cfg.GetClusterInfo().hvparams,
3089
         node_group_uuids,
3090
         groups_config))
3081 3091
    else:
3082 3092
      vf_nvinfo = all_nvinfo
3083 3093
      vf_node_info = self.my_node_info.values()
b/lib/cmdlib/node.py
270 270
    else:
271 271
      self.master_candidate = False
272 272

  
273
    node_group = self.cfg.LookupNodeGroup(self.op.group)
274

  
273 275
    if self.op.readd:
274 276
      self.new_node = existing_node_info
275 277
    else:
276
      node_group = self.cfg.LookupNodeGroup(self.op.group)
277 278
      self.new_node = objects.Node(name=node_name,
278 279
                                   primary_ip=self.op.primary_ip,
279 280
                                   secondary_ip=secondary_ip,
......
313 314
      cname = self.cfg.GetClusterName()
314 315
      result = rpcrunner.call_node_verify_light(
315 316
          [node_name], vparams, cname,
316
          self.cfg.GetClusterInfo().hvparams)[node_name]
317
          self.cfg.GetClusterInfo().hvparams,
318
          {node_name: node_group},
319
          self.cfg.GetAllNodeGroupsInfoDict()
320
        )[node_name]
317 321
      (errmsgs, _) = CheckNodePVs(result.payload, excl_stor)
318 322
      if errmsgs:
319 323
        raise errors.OpPrereqError("Checks on node PVs failed: %s" %
......
381 385
    result = self.rpc.call_node_verify(
382 386
               node_verifier_uuids, node_verify_param,
383 387
               self.cfg.GetClusterName(),
384
               self.cfg.GetClusterInfo().hvparams)
388
               self.cfg.GetClusterInfo().hvparams,
389
               {self.new_node.name: self.cfg.LookupNodeGroup(self.op.group)},
390
               self.cfg.GetAllNodeGroupsInfoDict()
391
               )
385 392
    for verifier in node_verifier_uuids:
386 393
      result[verifier].Raise("Cannot communicate with node %s" % verifier)
387 394
      nl_payload = result[verifier].payload[constants.NV_NODELIST]
b/lib/config.py
1374 1374
    return self._UnlockedGetAllNodeGroupsInfo()
1375 1375

  
1376 1376
  @locking.ssynchronized(_config_lock, shared=1)
1377
  def GetAllNodeGroupsInfoDict(self):
1378
    """Get the configuration of all node groups expressed as a dictionary of
1379
    dictionaries.
1380

  
1381
    """
1382
    return dict(map(lambda (uuid, ng): (uuid, ng.ToDict()),
1383
                    self._UnlockedGetAllNodeGroupsInfo().items()))
1384

  
1385
  @locking.ssynchronized(_config_lock, shared=1)
1377 1386
  def GetNodeGroupList(self):
1378 1387
    """Get a list of node groups.
1379 1388

  
b/lib/rpc_defs.py
489 489
    ("checkdict", None, "What to verify"),
490 490
    ("cluster_name", None, "Cluster name"),
491 491
    ("all_hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
492
    ("node_groups", None, "node names mapped to their group uuids"),
493
    ("groups_cfg", None,
494
      "a dictionary mapping group uuids to their configuration"),
492 495
    ], None, None, "Request verification of given parameters"),
493 496
  ("node_volumes", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
494 497
   "Gets all volumes on node(s)"),
......
611 614
      ("checkdict", None, "What to verify"),
612 615
      ("cluster_name", None, "Cluster name"),
613 616
      ("hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
617
      ("node_groups", None, "node names mapped to their group uuids"),
618
      ("groups_cfg", None,
619
       "a dictionary mapping group uuids to their configuration"),
614 620
      ], None, None, "Request verification of given parameters"),
615 621
    ]),
616 622
  "RpcClientConfig": _Prepare([
b/lib/server/noded.py
774 774
    """Run a verify sequence on this node.
775 775

  
776 776
    """
777
    (what, cluster_name, hvparams) = params
778
    return backend.VerifyNode(what, cluster_name, hvparams)
777
    (what, cluster_name, hvparams, node_groups, groups_cfg) = params
778
    return backend.VerifyNode(what, cluster_name, hvparams,
779
                              node_groups, groups_cfg)
779 780

  
780 781
  @classmethod
781 782
  def perspective_node_verify_light(cls, params):
782 783
    """Run a light verify sequence on this node.
783 784

  
785
    This call is meant to perform a less strict verification of the node in
786
    certain situations. Right now, it is invoked only when a node is just about
787
    to be added to a cluster, and even then, it performs the same checks as
788
    L{perspective_node_verify}.
784 789
    """
785
    # So far it's the same as the normal node_verify
786 790
    return cls.perspective_node_verify(params)
787 791

  
788 792
  @staticmethod
b/lib/ssh.py
123 123
    self.ipv6 = ipv6
124 124

  
125 125
  def _BuildSshOptions(self, batch, ask_key, use_cluster_key,
126
                       strict_host_check, private_key=None, quiet=True):
126
                       strict_host_check, private_key=None, quiet=True,
127
                       port=None):
127 128
    """Builds a list with needed SSH options.
128 129

  
129 130
    @param batch: same as ssh's batch option
......
134 135
    @param strict_host_check: this makes the host key checking strict
135 136
    @param private_key: use this private key instead of the default
136 137
    @param quiet: whether to enable -q to ssh
138
    @param port: the SSH port to use, or None to use the default
137 139

  
138 140
    @rtype: list
139 141
    @return: the list of options ready to use in L{utils.process.RunCmd}
......
156 158
    if private_key:
157 159
      options.append("-i%s" % private_key)
158 160

  
161
    if port:
162
      options.append("-oPort=%d" % port)
163

  
159 164
    # TODO: Too many boolean options, maybe convert them to more descriptive
160 165
    # constants.
161 166

  
......
190 195

  
191 196
  def BuildCmd(self, hostname, user, command, batch=True, ask_key=False,
192 197
               tty=False, use_cluster_key=True, strict_host_check=True,
193
               private_key=None, quiet=True):
198
               private_key=None, quiet=True, port=None):
194 199
    """Build an ssh command to execute a command on a remote node.
195 200

  
196 201
    @param hostname: the target host, string
......
205 210
    @param strict_host_check: whether to check the host's SSH key at all
206 211
    @param private_key: use this private key instead of the default
207 212
    @param quiet: whether to enable -q to ssh
213
    @param port: the SSH port on which the node's daemon is running
208 214

  
209 215
    @return: the ssh call to run 'command' on the remote host.
210 216

  
......
212 218
    argv = [constants.SSH]
213 219
    argv.extend(self._BuildSshOptions(batch, ask_key, use_cluster_key,
214 220
                                      strict_host_check, private_key,
215
                                      quiet=quiet))
221
                                      quiet=quiet, port=port))
216 222
    if tty:
217 223
      argv.extend(["-t", "-t"])
218 224

  
......
277 283

  
278 284
    return not result.failed
279 285

  
280
  def VerifyNodeHostname(self, node):
286
  def VerifyNodeHostname(self, node, ssh_port):
281 287
    """Verify hostname consistency via SSH.
282 288

  
283 289
    This functions connects via ssh to a node and compares the hostname
......
290 296

  
291 297
    @param node: nodename of a host to check; can be short or
292 298
        full qualified hostname
299
    @param ssh_port: the port of a SSH daemon running on the node
293 300

  
294 301
    @return: (success, detail), where:
295 302
        - success: True/False
......
301 308
           "else"
302 309
           "  echo \"$GANETI_HOSTNAME\";"
303 310
           "fi")
304
    retval = self.Run(node, constants.SSH_LOGIN_USER, cmd, quiet=False)
311
    retval = self.Run(node, constants.SSH_LOGIN_USER, cmd,
312
                      quiet=False, port=ssh_port)
305 313

  
306 314
    if retval.failed:
307 315
      msg = "ssh problem"
b/test/py/ganeti.backend_unittest.py
89 89
    # this a real functional test, but requires localhost to be reachable
90 90
    local_data = (netutils.Hostname.GetSysName(),
91 91
                  constants.IP4_ADDRESS_LOCALHOST)
92
    result = backend.VerifyNode({constants.NV_MASTERIP: local_data}, None, {})
92
    result = backend.VerifyNode({constants.NV_MASTERIP: local_data}, None, {}, {}, {})
93 93
    self.failUnless(constants.NV_MASTERIP in result,
94 94
                    "Master IP data not returned")
95 95
    self.failUnless(result[constants.NV_MASTERIP], "Cannot reach localhost")
......
100 100
    bad_data =  ("master.example.com", "192.0.2.1")
101 101
    # we just test that whatever TcpPing returns, VerifyNode returns too
102 102
    netutils.TcpPing = lambda a, b, source=None: False
103
    result = backend.VerifyNode({constants.NV_MASTERIP: bad_data}, None, {})
103
    result = backend.VerifyNode({constants.NV_MASTERIP: bad_data}, None, {}, {}, {})
104 104
    self.failUnless(constants.NV_MASTERIP in result,
105 105
                    "Master IP data not returned")
106 106
    self.failIf(result[constants.NV_MASTERIP],

Also available in: Unified diff