Statistics
| Branch: | Tag: | Revision:

root / lib / bootstrap.py @ a55474c7

History | View | Annotate | Download (19.5 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions to bootstrap a new cluster.
23

24
"""
25

    
26
import os
27
import os.path
28
import re
29
import logging
30
import time
31

    
32
from ganeti import rpc
33
from ganeti import ssh
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import config
37
from ganeti import constants
38
from ganeti import objects
39
from ganeti import ssconf
40
from ganeti import serializer
41
from ganeti import hypervisor
42

    
43

    
44
def _InitSSHSetup():
45
  """Setup the SSH configuration for the cluster.
46

47
  This generates a dsa keypair for root, adds the pub key to the
48
  permitted hosts and adds the hostkey to its own known hosts.
49

50
  """
51
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
52

    
53
  for name in priv_key, pub_key:
54
    if os.path.exists(name):
55
      utils.CreateBackup(name)
56
    utils.RemoveFile(name)
57

    
58
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
59
                         "-f", priv_key,
60
                         "-q", "-N", ""])
61
  if result.failed:
62
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
63
                             result.output)
64

    
65
  utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
66

    
67

    
68
def GenerateHmacKey(file_name):
69
  """Writes a new HMAC key.
70

71
  @type file_name: str
72
  @param file_name: Path to output file
73

74
  """
75
  utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400)
76

    
77

    
78
def _InitGanetiServerSetup(master_name):
79
  """Setup the necessary configuration for the initial node daemon.
80

81
  This creates the nodepass file containing the shared password for
82
  the cluster and also generates the SSL certificate.
83

84
  """
85
  utils.GenerateSelfSignedSslCert(constants.SSL_CERT_FILE)
86

    
87
  # Don't overwrite existing file
88
  if not os.path.exists(constants.RAPI_CERT_FILE):
89
    utils.GenerateSelfSignedSslCert(constants.RAPI_CERT_FILE)
90

    
91
  if not os.path.exists(constants.HMAC_CLUSTER_KEY):
92
    GenerateHmacKey(constants.HMAC_CLUSTER_KEY)
93

    
94
  result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
95
  if result.failed:
96
    raise errors.OpExecError("Could not start the node daemon, command %s"
97
                             " had exitcode %s and error %s" %
98
                             (result.cmd, result.exit_code, result.output))
99

    
100
  # Wait for node daemon to become responsive
101
  def _CheckNodeDaemon():
102
    result = rpc.RpcRunner.call_version([master_name])[master_name]
103
    if result.fail_msg:
104
      raise utils.RetryAgain()
105

    
106
  try:
107
    utils.Retry(_CheckNodeDaemon, 1.0, 10.0)
108
  except utils.RetryTimeout:
109
    raise errors.OpExecError("Node daemon didn't answer queries within"
110
                             " 10 seconds")
111

    
112
def InitCluster(cluster_name, mac_prefix,
113
                master_netdev, file_storage_dir, candidate_pool_size,
114
                secondary_ip=None, vg_name=None, beparams=None,
115
                nicparams=None, hvparams=None, enabled_hypervisors=None,
116
                modify_etc_hosts=True, modify_ssh_setup=True):
117
  """Initialise the cluster.
118

119
  @type candidate_pool_size: int
120
  @param candidate_pool_size: master candidate pool size
121

122
  """
123
  # TODO: complete the docstring
124
  if config.ConfigWriter.IsCluster():
125
    raise errors.OpPrereqError("Cluster is already initialised",
126
                               errors.ECODE_STATE)
127

    
128
  if not enabled_hypervisors:
129
    raise errors.OpPrereqError("Enabled hypervisors list must contain at"
130
                               " least one member", errors.ECODE_INVAL)
131
  invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
132
  if invalid_hvs:
133
    raise errors.OpPrereqError("Enabled hypervisors contains invalid"
134
                               " entries: %s" % invalid_hvs,
135
                               errors.ECODE_INVAL)
136

    
137
  hostname = utils.GetHostInfo()
138

    
139
  if hostname.ip.startswith("127."):
140
    raise errors.OpPrereqError("This host's IP resolves to the private"
141
                               " range (%s). Please fix DNS or %s." %
142
                               (hostname.ip, constants.ETC_HOSTS),
143
                               errors.ECODE_ENVIRON)
144

    
145
  if not utils.OwnIpAddress(hostname.ip):
146
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
147
                               " to %s,\nbut this ip address does not"
148
                               " belong to this host. Aborting." %
149
                               hostname.ip, errors.ECODE_ENVIRON)
150

    
151
  clustername = utils.GetHostInfo(cluster_name)
152

    
153
  if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
154
                   timeout=5):
155
    raise errors.OpPrereqError("Cluster IP already active. Aborting.",
156
                               errors.ECODE_NOTUNIQUE)
157

    
158
  if secondary_ip:
159
    if not utils.IsValidIP(secondary_ip):
160
      raise errors.OpPrereqError("Invalid secondary ip given",
161
                                 errors.ECODE_INVAL)
162
    if (secondary_ip != hostname.ip and
163
        not utils.OwnIpAddress(secondary_ip)):
164
      raise errors.OpPrereqError("You gave %s as secondary IP,"
165
                                 " but it does not belong to this host." %
166
                                 secondary_ip, errors.ECODE_ENVIRON)
167
  else:
168
    secondary_ip = hostname.ip
169

    
170
  if vg_name is not None:
171
    # Check if volume group is valid
172
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
173
                                          constants.MIN_VG_SIZE)
174
    if vgstatus:
175
      raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
176
                                 " you are not using lvm" % vgstatus,
177
                                 errors.ECODE_INVAL)
178

    
179
  file_storage_dir = os.path.normpath(file_storage_dir)
180

    
181
  if not os.path.isabs(file_storage_dir):
182
    raise errors.OpPrereqError("The file storage directory you passed is"
183
                               " not an absolute path.", errors.ECODE_INVAL)
184

    
185
  if not os.path.exists(file_storage_dir):
186
    try:
187
      os.makedirs(file_storage_dir, 0750)
188
    except OSError, err:
189
      raise errors.OpPrereqError("Cannot create file storage directory"
190
                                 " '%s': %s" % (file_storage_dir, err),
191
                                 errors.ECODE_ENVIRON)
192

    
193
  if not os.path.isdir(file_storage_dir):
194
    raise errors.OpPrereqError("The file storage directory '%s' is not"
195
                               " a directory." % file_storage_dir,
196
                               errors.ECODE_ENVIRON)
197

    
198
  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
199
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
200
                               errors.ECODE_INVAL)
201

    
202
  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
203
  if result.failed:
204
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
205
                               (master_netdev,
206
                                result.output.strip()), errors.ECODE_INVAL)
207

    
208
  dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
209
  utils.EnsureDirs(dirs)
210

    
211
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
212
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
213
  objects.NIC.CheckParameterSyntax(nicparams)
214

    
215
  # hvparams is a mapping of hypervisor->hvparams dict
216
  for hv_name, hv_params in hvparams.iteritems():
217
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
218
    hv_class = hypervisor.GetHypervisor(hv_name)
219
    hv_class.CheckParameterSyntax(hv_params)
220

    
221
  # set up the inter-node password and certificate
222
  _InitGanetiServerSetup(hostname.name)
223

    
224
  # set up ssh config and /etc/hosts
225
  sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
226
  sshkey = sshline.split(" ")[1]
227

    
228
  if modify_etc_hosts:
229
    utils.AddHostToEtcHosts(hostname.name)
230

    
231
  if modify_ssh_setup:
232
    _InitSSHSetup()
233

    
234
  now = time.time()
235

    
236
  # init of cluster config file
237
  cluster_config = objects.Cluster(
238
    serial_no=1,
239
    rsahostkeypub=sshkey,
240
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
241
    mac_prefix=mac_prefix,
242
    volume_group_name=vg_name,
243
    tcpudp_port_pool=set(),
244
    master_node=hostname.name,
245
    master_ip=clustername.ip,
246
    master_netdev=master_netdev,
247
    cluster_name=clustername.name,
248
    file_storage_dir=file_storage_dir,
249
    enabled_hypervisors=enabled_hypervisors,
250
    beparams={constants.PP_DEFAULT: beparams},
251
    nicparams={constants.PP_DEFAULT: nicparams},
252
    hvparams=hvparams,
253
    candidate_pool_size=candidate_pool_size,
254
    modify_etc_hosts=modify_etc_hosts,
255
    modify_ssh_setup=modify_ssh_setup,
256
    ctime=now,
257
    mtime=now,
258
    uuid=utils.NewUUID(),
259
    )
260
  master_node_config = objects.Node(name=hostname.name,
261
                                    primary_ip=hostname.ip,
262
                                    secondary_ip=secondary_ip,
263
                                    serial_no=1,
264
                                    master_candidate=True,
265
                                    offline=False, drained=False,
266
                                    )
267
  InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
268
  cfg = config.ConfigWriter()
269
  ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
270
  cfg.Update(cfg.GetClusterInfo(), logging.error)
271

    
272
  # start the master ip
273
  # TODO: Review rpc call from bootstrap
274
  # TODO: Warn on failed start master
275
  rpc.RpcRunner.call_node_start_master(hostname.name, True, False)
276

    
277

    
278
def InitConfig(version, cluster_config, master_node_config,
279
               cfg_file=constants.CLUSTER_CONF_FILE):
280
  """Create the initial cluster configuration.
281

282
  It will contain the current node, which will also be the master
283
  node, and no instances.
284

285
  @type version: int
286
  @param version: configuration version
287
  @type cluster_config: L{objects.Cluster}
288
  @param cluster_config: cluster configuration
289
  @type master_node_config: L{objects.Node}
290
  @param master_node_config: master node configuration
291
  @type cfg_file: string
292
  @param cfg_file: configuration file path
293

294
  """
295
  nodes = {
296
    master_node_config.name: master_node_config,
297
    }
298

    
299
  now = time.time()
300
  config_data = objects.ConfigData(version=version,
301
                                   cluster=cluster_config,
302
                                   nodes=nodes,
303
                                   instances={},
304
                                   serial_no=1,
305
                                   ctime=now, mtime=now)
306
  utils.WriteFile(cfg_file,
307
                  data=serializer.Dump(config_data.ToDict()),
308
                  mode=0600)
309

    
310

    
311
def FinalizeClusterDestroy(master):
312
  """Execute the last steps of cluster destroy
313

314
  This function shuts down all the daemons, completing the destroy
315
  begun in cmdlib.LUDestroyOpcode.
316

317
  """
318
  cfg = config.ConfigWriter()
319
  modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
320
  result = rpc.RpcRunner.call_node_stop_master(master, True)
321
  msg = result.fail_msg
322
  if msg:
323
    logging.warning("Could not disable the master role: %s", msg)
324
  result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup)
325
  msg = result.fail_msg
326
  if msg:
327
    logging.warning("Could not shutdown the node daemon and cleanup"
328
                    " the node: %s", msg)
329

    
330

    
331
def SetupNodeDaemon(cluster_name, node, ssh_key_check):
332
  """Add a node to the cluster.
333

334
  This function must be called before the actual opcode, and will ssh
335
  to the remote node, copy the needed files, and start ganeti-noded,
336
  allowing the master to do the rest via normal rpc calls.
337

338
  @param cluster_name: the cluster name
339
  @param node: the name of the new node
340
  @param ssh_key_check: whether to do a strict key check
341

342
  """
343
  sshrunner = ssh.SshRunner(cluster_name)
344

    
345
  noded_cert = utils.ReadFile(constants.SSL_CERT_FILE)
346
  rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE)
347
  hmac_key = utils.ReadFile(constants.HMAC_CLUSTER_KEY)
348

    
349
  # in the base64 pem encoding, neither '!' nor '.' are valid chars,
350
  # so we use this to detect an invalid certificate; as long as the
351
  # cert doesn't contain this, the here-document will be correctly
352
  # parsed by the shell sequence below. HMAC keys are hexadecimal strings,
353
  # so the same restrictions apply.
354
  for content in (noded_cert, rapi_cert, hmac_key):
355
    if re.search('^!EOF\.', content, re.MULTILINE):
356
      raise errors.OpExecError("invalid SSL certificate or HMAC key")
357

    
358
  if not noded_cert.endswith("\n"):
359
    noded_cert += "\n"
360
  if not rapi_cert.endswith("\n"):
361
    rapi_cert += "\n"
362
  if not hmac_key.endswith("\n"):
363
    hmac_key += "\n"
364

    
365
  # set up inter-node password and certificate and restarts the node daemon
366
  # and then connect with ssh to set password and start ganeti-noded
367
  # note that all the below variables are sanitized at this point,
368
  # either by being constants or by the checks above
369
  mycommand = ("umask 077 && "
370
               "cat > '%s' << '!EOF.' && \n"
371
               "%s!EOF.\n"
372
               "cat > '%s' << '!EOF.' && \n"
373
               "%s!EOF.\n"
374
               "cat > '%s' << '!EOF.' && \n"
375
               "%s!EOF.\n"
376
               "chmod 0400 %s %s %s && "
377
               "%s start %s" %
378
               (constants.SSL_CERT_FILE, noded_cert,
379
                constants.RAPI_CERT_FILE, rapi_cert,
380
                constants.HMAC_CLUSTER_KEY, hmac_key,
381
                constants.SSL_CERT_FILE, constants.RAPI_CERT_FILE,
382
                constants.HMAC_CLUSTER_KEY,
383
                constants.DAEMON_UTIL, constants.NODED))
384

    
385
  result = sshrunner.Run(node, 'root', mycommand, batch=False,
386
                         ask_key=ssh_key_check,
387
                         use_cluster_key=False,
388
                         strict_host_check=ssh_key_check)
389
  if result.failed:
390
    raise errors.OpExecError("Remote command on node %s, error: %s,"
391
                             " output: %s" %
392
                             (node, result.fail_reason, result.output))
393

    
394

    
395
def MasterFailover(no_voting=False):
396
  """Failover the master node.
397

398
  This checks that we are not already the master, and will cause the
399
  current master to cease being master, and the non-master to become
400
  new master.
401

402
  @type no_voting: boolean
403
  @param no_voting: force the operation without remote nodes agreement
404
                      (dangerous)
405

406
  """
407
  sstore = ssconf.SimpleStore()
408

    
409
  old_master, new_master = ssconf.GetMasterAndMyself(sstore)
410
  node_list = sstore.GetNodeList()
411
  mc_list = sstore.GetMasterCandidates()
412

    
413
  if old_master == new_master:
414
    raise errors.OpPrereqError("This commands must be run on the node"
415
                               " where you want the new master to be."
416
                               " %s is already the master" %
417
                               old_master, errors.ECODE_INVAL)
418

    
419
  if new_master not in mc_list:
420
    mc_no_master = [name for name in mc_list if name != old_master]
421
    raise errors.OpPrereqError("This node is not among the nodes marked"
422
                               " as master candidates. Only these nodes"
423
                               " can become masters. Current list of"
424
                               " master candidates is:\n"
425
                               "%s" % ('\n'.join(mc_no_master)),
426
                               errors.ECODE_STATE)
427

    
428
  if not no_voting:
429
    vote_list = GatherMasterVotes(node_list)
430

    
431
    if vote_list:
432
      voted_master = vote_list[0][0]
433
      if voted_master is None:
434
        raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
435
                                   " not respond.", errors.ECODE_ENVIRON)
436
      elif voted_master != old_master:
437
        raise errors.OpPrereqError("I have a wrong configuration, I believe"
438
                                   " the master is %s but the other nodes"
439
                                   " voted %s. Please resync the configuration"
440
                                   " of this node." %
441
                                   (old_master, voted_master),
442
                                   errors.ECODE_STATE)
443
  # end checks
444

    
445
  rcode = 0
446

    
447
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
448

    
449
  result = rpc.RpcRunner.call_node_stop_master(old_master, True)
450
  msg = result.fail_msg
451
  if msg:
452
    logging.error("Could not disable the master role on the old master"
453
                 " %s, please disable manually: %s", old_master, msg)
454

    
455
  # Here we have a phase where no master should be running
456

    
457
  # instantiate a real config writer, as we now know we have the
458
  # configuration data
459
  cfg = config.ConfigWriter()
460

    
461
  cluster_info = cfg.GetClusterInfo()
462
  cluster_info.master_node = new_master
463
  # this will also regenerate the ssconf files, since we updated the
464
  # cluster info
465
  cfg.Update(cluster_info, logging.error)
466

    
467
  result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
468
  msg = result.fail_msg
469
  if msg:
470
    logging.error("Could not start the master role on the new master"
471
                  " %s, please check: %s", new_master, msg)
472
    rcode = 1
473

    
474
  return rcode
475

    
476

    
477
def GetMaster():
478
  """Returns the current master node.
479

480
  This is a separate function in bootstrap since it's needed by
481
  gnt-cluster, and instead of importing directly ssconf, it's better
482
  to abstract it in bootstrap, where we do use ssconf in other
483
  functions too.
484

485
  """
486
  sstore = ssconf.SimpleStore()
487

    
488
  old_master, _ = ssconf.GetMasterAndMyself(sstore)
489

    
490
  return old_master
491

    
492

    
493
def GatherMasterVotes(node_list):
494
  """Check the agreement on who is the master.
495

496
  This function will return a list of (node, number of votes), ordered
497
  by the number of votes. Errors will be denoted by the key 'None'.
498

499
  Note that the sum of votes is the number of nodes this machine
500
  knows, whereas the number of entries in the list could be different
501
  (if some nodes vote for another master).
502

503
  We remove ourselves from the list since we know that (bugs aside)
504
  since we use the same source for configuration information for both
505
  backend and boostrap, we'll always vote for ourselves.
506

507
  @type node_list: list
508
  @param node_list: the list of nodes to query for master info; the current
509
      node will be removed if it is in the list
510
  @rtype: list
511
  @return: list of (node, votes)
512

513
  """
514
  myself = utils.HostInfo().name
515
  try:
516
    node_list.remove(myself)
517
  except ValueError:
518
    pass
519
  if not node_list:
520
    # no nodes left (eventually after removing myself)
521
    return []
522
  results = rpc.RpcRunner.call_master_info(node_list)
523
  if not isinstance(results, dict):
524
    # this should not happen (unless internal error in rpc)
525
    logging.critical("Can't complete rpc call, aborting master startup")
526
    return [(None, len(node_list))]
527
  votes = {}
528
  for node in results:
529
    nres = results[node]
530
    data = nres.payload
531
    msg = nres.fail_msg
532
    fail = False
533
    if msg:
534
      logging.warning("Error contacting node %s: %s", node, msg)
535
      fail = True
536
    elif not isinstance(data, (tuple, list)) or len(data) < 3:
537
      logging.warning("Invalid data received from node %s: %s", node, data)
538
      fail = True
539
    if fail:
540
      if None not in votes:
541
        votes[None] = 0
542
      votes[None] += 1
543
      continue
544
    master_node = data[2]
545
    if master_node not in votes:
546
      votes[master_node] = 0
547
    votes[master_node] += 1
548

    
549
  vote_list = [v for v in votes.items()]
550
  # sort first on number of votes then on name, since we want None
551
  # sorted later if we have the half of the nodes not responding, and
552
  # half voting all for the same master
553
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
554

    
555
  return vote_list