Statistics
| Branch: | Tag: | Revision:

root / lib / bootstrap.py @ ec0652ad

History | View | Annotate | Download (18.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions to bootstrap a new cluster.
23

24
"""
25

    
26
import os
27
import os.path
28
import re
29
import logging
30
import tempfile
31

    
32
from ganeti import rpc
33
from ganeti import ssh
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import config
37
from ganeti import constants
38
from ganeti import objects
39
from ganeti import ssconf
40
from ganeti import hypervisor
41

    
42

    
43
def _InitSSHSetup():
44
  """Setup the SSH configuration for the cluster.
45

46
  This generates a dsa keypair for root, adds the pub key to the
47
  permitted hosts and adds the hostkey to its own known hosts.
48

49
  """
50
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
51

    
52
  for name in priv_key, pub_key:
53
    if os.path.exists(name):
54
      utils.CreateBackup(name)
55
    utils.RemoveFile(name)
56

    
57
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
58
                         "-f", priv_key,
59
                         "-q", "-N", ""])
60
  if result.failed:
61
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
62
                             result.output)
63

    
64
  f = open(pub_key, 'r')
65
  try:
66
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
67
  finally:
68
    f.close()
69

    
70

    
71
def _GenerateSelfSignedSslCert(file_name, validity=(365 * 5)):
72
  """Generates a self-signed SSL certificate.
73

74
  @type file_name: str
75
  @param file_name: Path to output file
76
  @type validity: int
77
  @param validity: Validity for certificate in days
78

79
  """
80
  (fd, tmp_file_name) = tempfile.mkstemp(dir=os.path.dirname(file_name))
81
  try:
82
    # Set permissions before writing key
83
    os.chmod(tmp_file_name, 0600)
84

    
85
    result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
86
                           "-days", str(validity), "-nodes", "-x509",
87
                           "-keyout", tmp_file_name, "-out", tmp_file_name,
88
                           "-batch"])
89
    if result.failed:
90
      raise errors.OpExecError("Could not generate SSL certificate, command"
91
                               " %s had exitcode %s and error message %s" %
92
                               (result.cmd, result.exit_code, result.output))
93

    
94
    # Make read-only
95
    os.chmod(tmp_file_name, 0400)
96

    
97
    os.rename(tmp_file_name, file_name)
98
  finally:
99
    utils.RemoveFile(tmp_file_name)
100

    
101

    
102
def _InitGanetiServerSetup():
103
  """Setup the necessary configuration for the initial node daemon.
104

105
  This creates the nodepass file containing the shared password for
106
  the cluster and also generates the SSL certificate.
107

108
  """
109
  _GenerateSelfSignedSslCert(constants.SSL_CERT_FILE)
110

    
111
  # Don't overwrite existing file
112
  if not os.path.exists(constants.RAPI_CERT_FILE):
113
    _GenerateSelfSignedSslCert(constants.RAPI_CERT_FILE)
114

    
115
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
116

    
117
  if result.failed:
118
    raise errors.OpExecError("Could not start the node daemon, command %s"
119
                             " had exitcode %s and error %s" %
120
                             (result.cmd, result.exit_code, result.output))
121

    
122

    
123
def InitCluster(cluster_name, mac_prefix,
124
                master_netdev, file_storage_dir, candidate_pool_size,
125
                secondary_ip=None, vg_name=None, beparams=None,
126
                nicparams=None, hvparams=None, enabled_hypervisors=None,
127
                default_hypervisor=None, modify_etc_hosts=True):
128
  """Initialise the cluster.
129

130
  @type candidate_pool_size: int
131
  @param candidate_pool_size: master candidate pool size
132

133
  """
134
  # TODO: complete the docstring
135
  if config.ConfigWriter.IsCluster():
136
    raise errors.OpPrereqError("Cluster is already initialised")
137

    
138
  hostname = utils.HostInfo()
139

    
140
  if hostname.ip.startswith("127."):
141
    raise errors.OpPrereqError("This host's IP resolves to the private"
142
                               " range (%s). Please fix DNS or %s." %
143
                               (hostname.ip, constants.ETC_HOSTS))
144

    
145
  if not utils.OwnIpAddress(hostname.ip):
146
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
147
                               " to %s,\nbut this ip address does not"
148
                               " belong to this host."
149
                               " Aborting." % hostname.ip)
150

    
151
  clustername = utils.HostInfo(cluster_name)
152

    
153
  if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
154
                   timeout=5):
155
    raise errors.OpPrereqError("Cluster IP already active. Aborting.")
156

    
157
  if secondary_ip:
158
    if not utils.IsValidIP(secondary_ip):
159
      raise errors.OpPrereqError("Invalid secondary ip given")
160
    if (secondary_ip != hostname.ip and
161
        not utils.OwnIpAddress(secondary_ip)):
162
      raise errors.OpPrereqError("You gave %s as secondary IP,"
163
                                 " but it does not belong to this host." %
164
                                 secondary_ip)
165
  else:
166
    secondary_ip = hostname.ip
167

    
168
  if vg_name is not None:
169
    # Check if volume group is valid
170
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
171
                                          constants.MIN_VG_SIZE)
172
    if vgstatus:
173
      raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
174
                                 " you are not using lvm" % vgstatus)
175

    
176
  file_storage_dir = os.path.normpath(file_storage_dir)
177

    
178
  if not os.path.isabs(file_storage_dir):
179
    raise errors.OpPrereqError("The file storage directory you passed is"
180
                               " not an absolute path.")
181

    
182
  if not os.path.exists(file_storage_dir):
183
    try:
184
      os.makedirs(file_storage_dir, 0750)
185
    except OSError, err:
186
      raise errors.OpPrereqError("Cannot create file storage directory"
187
                                 " '%s': %s" %
188
                                 (file_storage_dir, err))
189

    
190
  if not os.path.isdir(file_storage_dir):
191
    raise errors.OpPrereqError("The file storage directory '%s' is not"
192
                               " a directory." % file_storage_dir)
193

    
194
  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
195
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)
196

    
197
  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
198
  if result.failed:
199
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
200
                               (master_netdev,
201
                                result.output.strip()))
202

    
203
  if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
204
          os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
205
    raise errors.OpPrereqError("Init.d script '%s' missing or not"
206
                               " executable." % constants.NODE_INITD_SCRIPT)
207

    
208
  dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
209
  utils.EnsureDirs(dirs)
210

    
211
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
212
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
213
  objects.NIC.CheckParameterSyntax(nicparams)
214

    
215
  # hvparams is a mapping of hypervisor->hvparams dict
216
  for hv_name, hv_params in hvparams.iteritems():
217
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
218
    hv_class = hypervisor.GetHypervisor(hv_name)
219
    hv_class.CheckParameterSyntax(hv_params)
220

    
221
  # set up the inter-node password and certificate
222
  _InitGanetiServerSetup()
223

    
224
  # set up ssh config and /etc/hosts
225
  f = open(constants.SSH_HOST_RSA_PUB, 'r')
226
  try:
227
    sshline = f.read()
228
  finally:
229
    f.close()
230
  sshkey = sshline.split(" ")[1]
231

    
232
  if modify_etc_hosts:
233
    utils.AddHostToEtcHosts(hostname.name)
234

    
235
  _InitSSHSetup()
236

    
237
  # init of cluster config file
238
  cluster_config = objects.Cluster(
239
    serial_no=1,
240
    rsahostkeypub=sshkey,
241
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
242
    mac_prefix=mac_prefix,
243
    volume_group_name=vg_name,
244
    tcpudp_port_pool=set(),
245
    master_node=hostname.name,
246
    master_ip=clustername.ip,
247
    master_netdev=master_netdev,
248
    cluster_name=clustername.name,
249
    file_storage_dir=file_storage_dir,
250
    enabled_hypervisors=enabled_hypervisors,
251
    default_hypervisor=default_hypervisor,
252
    beparams={constants.PP_DEFAULT: beparams},
253
    nicparams={constants.PP_DEFAULT: nicparams},
254
    hvparams=hvparams,
255
    candidate_pool_size=candidate_pool_size,
256
    modify_etc_hosts=modify_etc_hosts,
257
    )
258
  master_node_config = objects.Node(name=hostname.name,
259
                                    primary_ip=hostname.ip,
260
                                    secondary_ip=secondary_ip,
261
                                    serial_no=1,
262
                                    master_candidate=True,
263
                                    offline=False, drained=False,
264
                                    )
265

    
266
  sscfg = InitConfig(constants.CONFIG_VERSION,
267
                     cluster_config, master_node_config)
268
  ssh.WriteKnownHostsFile(sscfg, constants.SSH_KNOWN_HOSTS_FILE)
269
  cfg = config.ConfigWriter()
270
  cfg.Update(cfg.GetClusterInfo())
271

    
272
  # start the master ip
273
  # TODO: Review rpc call from bootstrap
274
  rpc.RpcRunner.call_node_start_master(hostname.name, True)
275

    
276

    
277
def InitConfig(version, cluster_config, master_node_config,
278
               cfg_file=constants.CLUSTER_CONF_FILE):
279
  """Create the initial cluster configuration.
280

281
  It will contain the current node, which will also be the master
282
  node, and no instances.
283

284
  @type version: int
285
  @param version: configuration version
286
  @type cluster_config: L{objects.Cluster}
287
  @param cluster_config: cluster configuration
288
  @type master_node_config: L{objects.Node}
289
  @param master_node_config: master node configuration
290
  @type cfg_file: string
291
  @param cfg_file: configuration file path
292

293
  @rtype: L{ssconf.SimpleConfigWriter}
294
  @return: initialized config instance
295

296
  """
297
  nodes = {
298
    master_node_config.name: master_node_config,
299
    }
300

    
301
  config_data = objects.ConfigData(version=version,
302
                                   cluster=cluster_config,
303
                                   nodes=nodes,
304
                                   instances={},
305
                                   serial_no=1)
306
  cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
307
  cfg.Save()
308

    
309
  return cfg
310

    
311

    
312
def FinalizeClusterDestroy(master):
313
  """Execute the last steps of cluster destroy
314

315
  This function shuts down all the daemons, completing the destroy
316
  begun in cmdlib.LUDestroyOpcode.
317

318
  """
319
  result = rpc.RpcRunner.call_node_stop_master(master, True)
320
  if result.failed or not result.data:
321
    logging.warning("Could not disable the master role")
322
  result = rpc.RpcRunner.call_node_leave_cluster(master)
323
  if result.failed or not result.data:
324
    logging.warning("Could not shutdown the node daemon and cleanup the node")
325

    
326

    
327
def SetupNodeDaemon(cluster_name, node, ssh_key_check):
328
  """Add a node to the cluster.
329

330
  This function must be called before the actual opcode, and will ssh
331
  to the remote node, copy the needed files, and start ganeti-noded,
332
  allowing the master to do the rest via normal rpc calls.
333

334
  @param cluster_name: the cluster name
335
  @param node: the name of the new node
336
  @param ssh_key_check: whether to do a strict key check
337

338
  """
339
  sshrunner = ssh.SshRunner(cluster_name)
340

    
341
  noded_cert = utils.ReadFile(constants.SSL_CERT_FILE)
342
  rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE)
343

    
344
  # in the base64 pem encoding, neither '!' nor '.' are valid chars,
345
  # so we use this to detect an invalid certificate; as long as the
346
  # cert doesn't contain this, the here-document will be correctly
347
  # parsed by the shell sequence below
348
  if (re.search('^!EOF\.', noded_cert, re.MULTILINE) or
349
      re.search('^!EOF\.', rapi_cert, re.MULTILINE)):
350
    raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
351

    
352
  if not noded_cert.endswith("\n"):
353
    noded_cert += "\n"
354
  if not rapi_cert.endswith("\n"):
355
    rapi_cert += "\n"
356

    
357
  # set up inter-node password and certificate and restarts the node daemon
358
  # and then connect with ssh to set password and start ganeti-noded
359
  # note that all the below variables are sanitized at this point,
360
  # either by being constants or by the checks above
361
  mycommand = ("umask 077 && "
362
               "cat > '%s' << '!EOF.' && \n"
363
               "%s!EOF.\n"
364
               "cat > '%s' << '!EOF.' && \n"
365
               "%s!EOF.\n"
366
               "chmod 0400 %s %s && "
367
               "%s restart" %
368
               (constants.SSL_CERT_FILE, noded_cert,
369
                constants.RAPI_CERT_FILE, rapi_cert,
370
                constants.SSL_CERT_FILE, constants.RAPI_CERT_FILE,
371
                constants.NODE_INITD_SCRIPT))
372

    
373
  result = sshrunner.Run(node, 'root', mycommand, batch=False,
374
                         ask_key=ssh_key_check,
375
                         use_cluster_key=False,
376
                         strict_host_check=ssh_key_check)
377
  if result.failed:
378
    raise errors.OpExecError("Remote command on node %s, error: %s,"
379
                             " output: %s" %
380
                             (node, result.fail_reason, result.output))
381

    
382

    
383
def MasterFailover():
384
  """Failover the master node.
385

386
  This checks that we are not already the master, and will cause the
387
  current master to cease being master, and the non-master to become
388
  new master.
389

390
  """
391
  sstore = ssconf.SimpleStore()
392

    
393
  old_master, new_master = ssconf.GetMasterAndMyself(sstore)
394
  node_list = sstore.GetNodeList()
395
  mc_list = sstore.GetMasterCandidates()
396

    
397
  if old_master == new_master:
398
    raise errors.OpPrereqError("This commands must be run on the node"
399
                               " where you want the new master to be."
400
                               " %s is already the master" %
401
                               old_master)
402

    
403
  if new_master not in mc_list:
404
    mc_no_master = [name for name in mc_list if name != old_master]
405
    raise errors.OpPrereqError("This node is not among the nodes marked"
406
                               " as master candidates. Only these nodes"
407
                               " can become masters. Current list of"
408
                               " master candidates is:\n"
409
                               "%s" % ('\n'.join(mc_no_master)))
410

    
411
  vote_list = GatherMasterVotes(node_list)
412

    
413
  if vote_list:
414
    voted_master = vote_list[0][0]
415
    if voted_master is None:
416
      raise errors.OpPrereqError("Cluster is inconsistent, most nodes did not"
417
                                 " respond.")
418
    elif voted_master != old_master:
419
      raise errors.OpPrereqError("I have wrong configuration, I believe the"
420
                                 " master is %s but the other nodes voted for"
421
                                 " %s. Please resync the configuration of"
422
                                 " this node." % (old_master, voted_master))
423
  # end checks
424

    
425
  rcode = 0
426

    
427
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
428

    
429
  result = rpc.RpcRunner.call_node_stop_master(old_master, True)
430
  if result.failed or not result.data:
431
    logging.error("Could not disable the master role on the old master"
432
                 " %s, please disable manually", old_master)
433

    
434
  # Here we have a phase where no master should be running
435

    
436
  # instantiate a real config writer, as we now know we have the
437
  # configuration data
438
  cfg = config.ConfigWriter()
439

    
440
  cluster_info = cfg.GetClusterInfo()
441
  cluster_info.master_node = new_master
442
  # this will also regenerate the ssconf files, since we updated the
443
  # cluster info
444
  cfg.Update(cluster_info)
445

    
446
  result = rpc.RpcRunner.call_node_start_master(new_master, True)
447
  if result.failed or not result.data:
448
    logging.error("Could not start the master role on the new master"
449
                  " %s, please check", new_master)
450
    rcode = 1
451

    
452
  return rcode
453

    
454

    
455
def GetMaster():
456
  """Returns the current master node.
457

458
  This is a separate function in bootstrap since it's needed by
459
  gnt-cluster, and instead of importing directly ssconf, it's better
460
  to abstract it in bootstrap, where we do use ssconf in other
461
  functions too.
462

463
  """
464
  sstore = ssconf.SimpleStore()
465

    
466
  old_master, _ = ssconf.GetMasterAndMyself(sstore)
467

    
468
  return old_master
469

    
470

    
471
def GatherMasterVotes(node_list):
472
  """Check the agreement on who is the master.
473

474
  This function will return a list of (node, number of votes), ordered
475
  by the number of votes. Errors will be denoted by the key 'None'.
476

477
  Note that the sum of votes is the number of nodes this machine
478
  knows, whereas the number of entries in the list could be different
479
  (if some nodes vote for another master).
480

481
  We remove ourselves from the list since we know that (bugs aside)
482
  since we use the same source for configuration information for both
483
  backend and boostrap, we'll always vote for ourselves.
484

485
  @type node_list: list
486
  @param node_list: the list of nodes to query for master info; the current
487
      node wil be removed if it is in the list
488
  @rtype: list
489
  @return: list of (node, votes)
490

491
  """
492
  myself = utils.HostInfo().name
493
  try:
494
    node_list.remove(myself)
495
  except ValueError:
496
    pass
497
  if not node_list:
498
    # no nodes left (eventually after removing myself)
499
    return []
500
  results = rpc.RpcRunner.call_master_info(node_list)
501
  if not isinstance(results, dict):
502
    # this should not happen (unless internal error in rpc)
503
    logging.critical("Can't complete rpc call, aborting master startup")
504
    return [(None, len(node_list))]
505
  votes = {}
506
  for node in results:
507
    nres = results[node]
508
    data = nres.data
509
    if nres.failed or not isinstance(data, (tuple, list)) or len(data) < 3:
510
      # here the rpc layer should have already logged errors
511
      if None not in votes:
512
        votes[None] = 0
513
      votes[None] += 1
514
      continue
515
    master_node = data[2]
516
    if master_node not in votes:
517
      votes[master_node] = 0
518
    votes[master_node] += 1
519

    
520
  vote_list = [v for v in votes.items()]
521
  # sort first on number of votes then on name, since we want None
522
  # sorted later if we have the half of the nodes not responding, and
523
  # half voting all for the same master
524
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
525

    
526
  return vote_list