Statistics
| Branch: | Tag: | Revision:

root / lib / bootstrap.py @ 8e2524c3

History | View | Annotate | Download (18.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions to bootstrap a new cluster.
23

24
"""
25

    
26
import os
27
import os.path
28
import re
29
import logging
30
import tempfile
31

    
32
from ganeti import rpc
33
from ganeti import ssh
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import config
37
from ganeti import constants
38
from ganeti import objects
39
from ganeti import ssconf
40
from ganeti import hypervisor
41

    
42

    
43
def _InitSSHSetup():
44
  """Setup the SSH configuration for the cluster.
45

46
  This generates a dsa keypair for root, adds the pub key to the
47
  permitted hosts and adds the hostkey to its own known hosts.
48

49
  """
50
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
51

    
52
  for name in priv_key, pub_key:
53
    if os.path.exists(name):
54
      utils.CreateBackup(name)
55
    utils.RemoveFile(name)
56

    
57
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
58
                         "-f", priv_key,
59
                         "-q", "-N", ""])
60
  if result.failed:
61
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
62
                             result.output)
63

    
64
  f = open(pub_key, 'r')
65
  try:
66
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
67
  finally:
68
    f.close()
69

    
70

    
71
def _GenerateSelfSignedSslCert(file_name, validity=(365 * 5)):
72
  """Generates a self-signed SSL certificate.
73

74
  @type file_name: str
75
  @param file_name: Path to output file
76
  @type validity: int
77
  @param validity: Validity for certificate in days
78

79
  """
80
  (fd, tmp_file_name) = tempfile.mkstemp(dir=os.path.dirname(file_name))
81
  try:
82
    # Set permissions before writing key
83
    os.chmod(tmp_file_name, 0600)
84

    
85
    result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
86
                           "-days", str(validity), "-nodes", "-x509",
87
                           "-keyout", tmp_file_name, "-out", tmp_file_name,
88
                           "-batch"])
89
    if result.failed:
90
      raise errors.OpExecError("Could not generate SSL certificate, command"
91
                               " %s had exitcode %s and error message %s" %
92
                               (result.cmd, result.exit_code, result.output))
93

    
94
    # Make read-only
95
    os.chmod(tmp_file_name, 0400)
96

    
97
    os.rename(tmp_file_name, file_name)
98
  finally:
99
    utils.RemoveFile(tmp_file_name)
100

    
101

    
102
def _InitGanetiServerSetup():
103
  """Setup the necessary configuration for the initial node daemon.
104

105
  This creates the nodepass file containing the shared password for
106
  the cluster and also generates the SSL certificate.
107

108
  """
109
  _GenerateSelfSignedSslCert(constants.SSL_CERT_FILE)
110

    
111
  # Don't overwrite existing file
112
  if not os.path.exists(constants.RAPI_CERT_FILE):
113
    _GenerateSelfSignedSslCert(constants.RAPI_CERT_FILE)
114

    
115
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
116

    
117
  if result.failed:
118
    raise errors.OpExecError("Could not start the node daemon, command %s"
119
                             " had exitcode %s and error %s" %
120
                             (result.cmd, result.exit_code, result.output))
121

    
122

    
123
def InitCluster(cluster_name, mac_prefix, def_bridge,
124
                master_netdev, file_storage_dir, candidate_pool_size,
125
                secondary_ip=None, vg_name=None, beparams=None, hvparams=None,
126
                enabled_hypervisors=None, default_hypervisor=None):
127
  """Initialise the cluster.
128

129
  @type candidate_pool_size: int
130
  @param candidate_pool_size: master candidate pool size
131

132
  """
133
  # TODO: complete the docstring
134
  if config.ConfigWriter.IsCluster():
135
    raise errors.OpPrereqError("Cluster is already initialised")
136

    
137
  hostname = utils.HostInfo()
138

    
139
  if hostname.ip.startswith("127."):
140
    raise errors.OpPrereqError("This host's IP resolves to the private"
141
                               " range (%s). Please fix DNS or %s." %
142
                               (hostname.ip, constants.ETC_HOSTS))
143

    
144
  if not utils.OwnIpAddress(hostname.ip):
145
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
146
                               " to %s,\nbut this ip address does not"
147
                               " belong to this host."
148
                               " Aborting." % hostname.ip)
149

    
150
  clustername = utils.HostInfo(cluster_name)
151

    
152
  if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
153
                   timeout=5):
154
    raise errors.OpPrereqError("Cluster IP already active. Aborting.")
155

    
156
  if secondary_ip:
157
    if not utils.IsValidIP(secondary_ip):
158
      raise errors.OpPrereqError("Invalid secondary ip given")
159
    if (secondary_ip != hostname.ip and
160
        not utils.OwnIpAddress(secondary_ip)):
161
      raise errors.OpPrereqError("You gave %s as secondary IP,"
162
                                 " but it does not belong to this host." %
163
                                 secondary_ip)
164
  else:
165
    secondary_ip = hostname.ip
166

    
167
  if vg_name is not None:
168
    # Check if volume group is valid
169
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
170
                                          constants.MIN_VG_SIZE)
171
    if vgstatus:
172
      raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
173
                                 " you are not using lvm" % vgstatus)
174

    
175
  file_storage_dir = os.path.normpath(file_storage_dir)
176

    
177
  if not os.path.isabs(file_storage_dir):
178
    raise errors.OpPrereqError("The file storage directory you passed is"
179
                               " not an absolute path.")
180

    
181
  if not os.path.exists(file_storage_dir):
182
    try:
183
      os.makedirs(file_storage_dir, 0750)
184
    except OSError, err:
185
      raise errors.OpPrereqError("Cannot create file storage directory"
186
                                 " '%s': %s" %
187
                                 (file_storage_dir, err))
188

    
189
  if not os.path.isdir(file_storage_dir):
190
    raise errors.OpPrereqError("The file storage directory '%s' is not"
191
                               " a directory." % file_storage_dir)
192

    
193
  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
194
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)
195

    
196
  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
197
  if result.failed:
198
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
199
                               (master_netdev,
200
                                result.output.strip()))
201

    
202
  if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
203
          os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
204
    raise errors.OpPrereqError("Init.d script '%s' missing or not"
205
                               " executable." % constants.NODE_INITD_SCRIPT)
206

    
207
  dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
208
  utils.EnsureDirs(dirs)
209

    
210
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
211
  # hvparams is a mapping of hypervisor->hvparams dict
212
  for hv_name, hv_params in hvparams.iteritems():
213
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
214
    hv_class = hypervisor.GetHypervisor(hv_name)
215
    hv_class.CheckParameterSyntax(hv_params)
216

    
217
  # set up the inter-node password and certificate
218
  _InitGanetiServerSetup()
219

    
220
  # set up ssh config and /etc/hosts
221
  f = open(constants.SSH_HOST_RSA_PUB, 'r')
222
  try:
223
    sshline = f.read()
224
  finally:
225
    f.close()
226
  sshkey = sshline.split(" ")[1]
227

    
228
  utils.AddHostToEtcHosts(hostname.name)
229
  _InitSSHSetup()
230

    
231
  # init of cluster config file
232
  cluster_config = objects.Cluster(
233
    serial_no=1,
234
    rsahostkeypub=sshkey,
235
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
236
    mac_prefix=mac_prefix,
237
    volume_group_name=vg_name,
238
    default_bridge=def_bridge,
239
    tcpudp_port_pool=set(),
240
    master_node=hostname.name,
241
    master_ip=clustername.ip,
242
    master_netdev=master_netdev,
243
    cluster_name=clustername.name,
244
    file_storage_dir=file_storage_dir,
245
    enabled_hypervisors=enabled_hypervisors,
246
    default_hypervisor=default_hypervisor,
247
    beparams={constants.BEGR_DEFAULT: beparams},
248
    hvparams=hvparams,
249
    candidate_pool_size=candidate_pool_size,
250
    )
251
  master_node_config = objects.Node(name=hostname.name,
252
                                    primary_ip=hostname.ip,
253
                                    secondary_ip=secondary_ip,
254
                                    serial_no=1,
255
                                    master_candidate=True,
256
                                    offline=False, drained=False,
257
                                    )
258

    
259
  sscfg = InitConfig(constants.CONFIG_VERSION,
260
                     cluster_config, master_node_config)
261
  ssh.WriteKnownHostsFile(sscfg, constants.SSH_KNOWN_HOSTS_FILE)
262
  cfg = config.ConfigWriter()
263
  cfg.Update(cfg.GetClusterInfo())
264

    
265
  # start the master ip
266
  # TODO: Review rpc call from bootstrap
267
  rpc.RpcRunner.call_node_start_master(hostname.name, True)
268

    
269

    
270
def InitConfig(version, cluster_config, master_node_config,
271
               cfg_file=constants.CLUSTER_CONF_FILE):
272
  """Create the initial cluster configuration.
273

274
  It will contain the current node, which will also be the master
275
  node, and no instances.
276

277
  @type version: int
278
  @param version: configuration version
279
  @type cluster_config: L{objects.Cluster}
280
  @param cluster_config: cluster configuration
281
  @type master_node_config: L{objects.Node}
282
  @param master_node_config: master node configuration
283
  @type cfg_file: string
284
  @param cfg_file: configuration file path
285

286
  @rtype: L{ssconf.SimpleConfigWriter}
287
  @return: initialized config instance
288

289
  """
290
  nodes = {
291
    master_node_config.name: master_node_config,
292
    }
293

    
294
  config_data = objects.ConfigData(version=version,
295
                                   cluster=cluster_config,
296
                                   nodes=nodes,
297
                                   instances={},
298
                                   serial_no=1)
299
  cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
300
  cfg.Save()
301

    
302
  return cfg
303

    
304

    
305
def FinalizeClusterDestroy(master):
306
  """Execute the last steps of cluster destroy
307

308
  This function shuts down all the daemons, completing the destroy
309
  begun in cmdlib.LUDestroyOpcode.
310

311
  """
312
  result = rpc.RpcRunner.call_node_stop_master(master, True)
313
  if result.failed or not result.data:
314
    logging.warning("Could not disable the master role")
315
  result = rpc.RpcRunner.call_node_leave_cluster(master)
316
  if result.failed or not result.data:
317
    logging.warning("Could not shutdown the node daemon and cleanup the node")
318

    
319

    
320
def SetupNodeDaemon(cluster_name, node, ssh_key_check):
321
  """Add a node to the cluster.
322

323
  This function must be called before the actual opcode, and will ssh
324
  to the remote node, copy the needed files, and start ganeti-noded,
325
  allowing the master to do the rest via normal rpc calls.
326

327
  @param cluster_name: the cluster name
328
  @param node: the name of the new node
329
  @param ssh_key_check: whether to do a strict key check
330

331
  """
332
  sshrunner = ssh.SshRunner(cluster_name)
333

    
334
  noded_cert = utils.ReadFile(constants.SSL_CERT_FILE)
335
  rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE)
336

    
337
  # in the base64 pem encoding, neither '!' nor '.' are valid chars,
338
  # so we use this to detect an invalid certificate; as long as the
339
  # cert doesn't contain this, the here-document will be correctly
340
  # parsed by the shell sequence below
341
  if (re.search('^!EOF\.', noded_cert, re.MULTILINE) or
342
      re.search('^!EOF\.', rapi_cert, re.MULTILINE)):
343
    raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
344

    
345
  if not noded_cert.endswith("\n"):
346
    noded_cert += "\n"
347
  if not rapi_cert.endswith("\n"):
348
    rapi_cert += "\n"
349

    
350
  # set up inter-node password and certificate and restarts the node daemon
351
  # and then connect with ssh to set password and start ganeti-noded
352
  # note that all the below variables are sanitized at this point,
353
  # either by being constants or by the checks above
354
  mycommand = ("umask 077 && "
355
               "cat > '%s' << '!EOF.' && \n"
356
               "%s!EOF.\n"
357
               "cat > '%s' << '!EOF.' && \n"
358
               "%s!EOF.\n"
359
               "chmod 0400 %s %s && "
360
               "%s restart" %
361
               (constants.SSL_CERT_FILE, noded_cert,
362
                constants.RAPI_CERT_FILE, rapi_cert,
363
                constants.SSL_CERT_FILE, constants.RAPI_CERT_FILE,
364
                constants.NODE_INITD_SCRIPT))
365

    
366
  result = sshrunner.Run(node, 'root', mycommand, batch=False,
367
                         ask_key=ssh_key_check,
368
                         use_cluster_key=False,
369
                         strict_host_check=ssh_key_check)
370
  if result.failed:
371
    raise errors.OpExecError("Remote command on node %s, error: %s,"
372
                             " output: %s" %
373
                             (node, result.fail_reason, result.output))
374

    
375

    
376
def MasterFailover(no_voting=False):
377
  """Failover the master node.
378

379
  This checks that we are not already the master, and will cause the
380
  current master to cease being master, and the non-master to become
381
  new master.
382

383
  @type no_voting: boolean
384
  @param no_voting: force the operation without remote nodes agreement
385
                      (dangerous)
386

387
  """
388
  sstore = ssconf.SimpleStore()
389

    
390
  old_master, new_master = ssconf.GetMasterAndMyself(sstore)
391
  node_list = sstore.GetNodeList()
392
  mc_list = sstore.GetMasterCandidates()
393

    
394
  if old_master == new_master:
395
    raise errors.OpPrereqError("This commands must be run on the node"
396
                               " where you want the new master to be."
397
                               " %s is already the master" %
398
                               old_master)
399

    
400
  if new_master not in mc_list:
401
    mc_no_master = [name for name in mc_list if name != old_master]
402
    raise errors.OpPrereqError("This node is not among the nodes marked"
403
                               " as master candidates. Only these nodes"
404
                               " can become masters. Current list of"
405
                               " master candidates is:\n"
406
                               "%s" % ('\n'.join(mc_no_master)))
407

    
408
  if not no_voting:
409
    vote_list = GatherMasterVotes(node_list)
410

    
411
    if vote_list:
412
      voted_master = vote_list[0][0]
413
      if voted_master is None:
414
        raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
415
                                   " not respond.")
416
      elif voted_master != old_master:
417
        raise errors.OpPrereqError("I have a wrong configuration, I believe"
418
                                   " the master is %s but the other nodes"
419
                                   " voted %s. Please resync the configuration"
420
                                   " of this node." %
421
                                   (old_master, voted_master))
422
  # end checks
423

    
424
  rcode = 0
425

    
426
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
427

    
428
  result = rpc.RpcRunner.call_node_stop_master(old_master, True)
429
  if result.failed or not result.data:
430
    logging.error("Could not disable the master role on the old master"
431
                 " %s, please disable manually", old_master)
432

    
433
  # Here we have a phase where no master should be running
434

    
435
  # instantiate a real config writer, as we now know we have the
436
  # configuration data
437
  cfg = config.ConfigWriter()
438

    
439
  cluster_info = cfg.GetClusterInfo()
440
  cluster_info.master_node = new_master
441
  # this will also regenerate the ssconf files, since we updated the
442
  # cluster info
443
  cfg.Update(cluster_info)
444

    
445
  # 2.0.X: Don't start the master if no_voting is true
446
  result = rpc.RpcRunner.call_node_start_master(new_master, not no_voting)
447
  if result.failed or not result.data:
448
    logging.error("Could not start the master role on the new master"
449
                  " %s, please check", new_master)
450
    rcode = 1
451

    
452
  return rcode
453

    
454

    
455
def GetMaster():
456
  """Returns the current master node.
457

458
  This is a separate function in bootstrap since it's needed by
459
  gnt-cluster, and instead of importing directly ssconf, it's better
460
  to abstract it in bootstrap, where we do use ssconf in other
461
  functions too.
462

463
  """
464
  sstore = ssconf.SimpleStore()
465

    
466
  old_master, _ = ssconf.GetMasterAndMyself(sstore)
467

    
468
  return old_master
469

    
470

    
471
def GatherMasterVotes(node_list):
472
  """Check the agreement on who is the master.
473

474
  This function will return a list of (node, number of votes), ordered
475
  by the number of votes. Errors will be denoted by the key 'None'.
476

477
  Note that the sum of votes is the number of nodes this machine
478
  knows, whereas the number of entries in the list could be different
479
  (if some nodes vote for another master).
480

481
  We remove ourselves from the list since we know that (bugs aside)
482
  since we use the same source for configuration information for both
483
  backend and boostrap, we'll always vote for ourselves.
484

485
  @type node_list: list
486
  @param node_list: the list of nodes to query for master info; the current
487
      node wil be removed if it is in the list
488
  @rtype: list
489
  @return: list of (node, votes)
490

491
  """
492
  myself = utils.HostInfo().name
493
  try:
494
    node_list.remove(myself)
495
  except ValueError:
496
    pass
497
  if not node_list:
498
    # no nodes left (eventually after removing myself)
499
    return []
500
  results = rpc.RpcRunner.call_master_info(node_list)
501
  if not isinstance(results, dict):
502
    # this should not happen (unless internal error in rpc)
503
    logging.critical("Can't complete rpc call, aborting master startup")
504
    return [(None, len(node_list))]
505
  votes = {}
506
  for node in results:
507
    nres = results[node]
508
    data = nres.data
509
    if nres.failed or not isinstance(data, (tuple, list)) or len(data) < 3:
510
      # here the rpc layer should have already logged errors
511
      if None not in votes:
512
        votes[None] = 0
513
      votes[None] += 1
514
      continue
515
    master_node = data[2]
516
    if master_node not in votes:
517
      votes[master_node] = 0
518
    votes[master_node] += 1
519

    
520
  vote_list = [v for v in votes.items()]
521
  # sort first on number of votes then on name, since we want None
522
  # sorted later if we have the half of the nodes not responding, and
523
  # half voting all for the same master
524
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
525

    
526
  return vote_list