Statistics
| Branch: | Tag: | Revision:

root / lib / bootstrap.py @ 47e0abee

History | View | Annotate | Download (36.1 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions to bootstrap a new cluster.
23

24
"""
25

    
26
import os
27
import os.path
28
import re
29
import logging
30
import time
31
import tempfile
32

    
33
from ganeti import rpc
34
from ganeti import ssh
35
from ganeti import utils
36
from ganeti import errors
37
from ganeti import config
38
from ganeti import constants
39
from ganeti import objects
40
from ganeti import ssconf
41
from ganeti import serializer
42
from ganeti import hypervisor
43
from ganeti.block import drbd
44
from ganeti import netutils
45
from ganeti import luxi
46
from ganeti import jstore
47
from ganeti import pathutils
48

    
49

    
50
# ec_id for InitConfig's temporary reservation manager
51
_INITCONF_ECID = "initconfig-ecid"
52

    
53
#: After how many seconds daemon must be responsive
54
_DAEMON_READY_TIMEOUT = 10.0
55

    
56

    
57
def _InitSSHSetup():
58
  """Setup the SSH configuration for the cluster.
59

60
  This generates a dsa keypair for root, adds the pub key to the
61
  permitted hosts and adds the hostkey to its own known hosts.
62

63
  """
64
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
65

    
66
  for name in priv_key, pub_key:
67
    if os.path.exists(name):
68
      utils.CreateBackup(name)
69
    utils.RemoveFile(name)
70

    
71
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
72
                         "-f", priv_key,
73
                         "-q", "-N", ""])
74
  if result.failed:
75
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
76
                             result.output)
77

    
78
  utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
79

    
80

    
81
def GenerateHmacKey(file_name):
82
  """Writes a new HMAC key.
83

84
  @type file_name: str
85
  @param file_name: Path to output file
86

87
  """
88
  utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
89
                  backup=True)
90

    
91

    
92
def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
93
                          new_confd_hmac_key, new_cds,
94
                          rapi_cert_pem=None, spice_cert_pem=None,
95
                          spice_cacert_pem=None, cds=None,
96
                          nodecert_file=pathutils.NODED_CERT_FILE,
97
                          rapicert_file=pathutils.RAPI_CERT_FILE,
98
                          spicecert_file=pathutils.SPICE_CERT_FILE,
99
                          spicecacert_file=pathutils.SPICE_CACERT_FILE,
100
                          hmackey_file=pathutils.CONFD_HMAC_KEY,
101
                          cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
102
  """Updates the cluster certificates, keys and secrets.
103

104
  @type new_cluster_cert: bool
105
  @param new_cluster_cert: Whether to generate a new cluster certificate
106
  @type new_rapi_cert: bool
107
  @param new_rapi_cert: Whether to generate a new RAPI certificate
108
  @type new_spice_cert: bool
109
  @param new_spice_cert: Whether to generate a new SPICE certificate
110
  @type new_confd_hmac_key: bool
111
  @param new_confd_hmac_key: Whether to generate a new HMAC key
112
  @type new_cds: bool
113
  @param new_cds: Whether to generate a new cluster domain secret
114
  @type rapi_cert_pem: string
115
  @param rapi_cert_pem: New RAPI certificate in PEM format
116
  @type spice_cert_pem: string
117
  @param spice_cert_pem: New SPICE certificate in PEM format
118
  @type spice_cacert_pem: string
119
  @param spice_cacert_pem: Certificate of the CA that signed the SPICE
120
                           certificate, in PEM format
121
  @type cds: string
122
  @param cds: New cluster domain secret
123
  @type nodecert_file: string
124
  @param nodecert_file: optional override of the node cert file path
125
  @type rapicert_file: string
126
  @param rapicert_file: optional override of the rapi cert file path
127
  @type spicecert_file: string
128
  @param spicecert_file: optional override of the spice cert file path
129
  @type spicecacert_file: string
130
  @param spicecacert_file: optional override of the spice CA cert file path
131
  @type hmackey_file: string
132
  @param hmackey_file: optional override of the hmac key file path
133

134
  """
135
  # noded SSL certificate
136
  cluster_cert_exists = os.path.exists(nodecert_file)
137
  if new_cluster_cert or not cluster_cert_exists:
138
    if cluster_cert_exists:
139
      utils.CreateBackup(nodecert_file)
140

    
141
    logging.debug("Generating new cluster certificate at %s", nodecert_file)
142
    utils.GenerateSelfSignedSslCert(nodecert_file)
143

    
144
  # confd HMAC key
145
  if new_confd_hmac_key or not os.path.exists(hmackey_file):
146
    logging.debug("Writing new confd HMAC key to %s", hmackey_file)
147
    GenerateHmacKey(hmackey_file)
148

    
149
  # RAPI
150
  rapi_cert_exists = os.path.exists(rapicert_file)
151

    
152
  if rapi_cert_pem:
153
    # Assume rapi_pem contains a valid PEM-formatted certificate and key
154
    logging.debug("Writing RAPI certificate at %s", rapicert_file)
155
    utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
156

    
157
  elif new_rapi_cert or not rapi_cert_exists:
158
    if rapi_cert_exists:
159
      utils.CreateBackup(rapicert_file)
160

    
161
    logging.debug("Generating new RAPI certificate at %s", rapicert_file)
162
    utils.GenerateSelfSignedSslCert(rapicert_file)
163

    
164
  # SPICE
165
  spice_cert_exists = os.path.exists(spicecert_file)
166
  spice_cacert_exists = os.path.exists(spicecacert_file)
167
  if spice_cert_pem:
168
    # spice_cert_pem implies also spice_cacert_pem
169
    logging.debug("Writing SPICE certificate at %s", spicecert_file)
170
    utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
171
    logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
172
    utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
173
  elif new_spice_cert or not spice_cert_exists:
174
    if spice_cert_exists:
175
      utils.CreateBackup(spicecert_file)
176
    if spice_cacert_exists:
177
      utils.CreateBackup(spicecacert_file)
178

    
179
    logging.debug("Generating new self-signed SPICE certificate at %s",
180
                  spicecert_file)
181
    (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file)
182

    
183
    # Self-signed certificate -> the public certificate is also the CA public
184
    # certificate
185
    logging.debug("Writing the public certificate to %s",
186
                  spicecert_file)
187
    utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
188

    
189
  # Cluster domain secret
190
  if cds:
191
    logging.debug("Writing cluster domain secret to %s", cds_file)
192
    utils.WriteFile(cds_file, data=cds, backup=True)
193

    
194
  elif new_cds or not os.path.exists(cds_file):
195
    logging.debug("Generating new cluster domain secret at %s", cds_file)
196
    GenerateHmacKey(cds_file)
197

    
198

    
199
def _InitGanetiServerSetup(master_name):
200
  """Setup the necessary configuration for the initial node daemon.
201

202
  This creates the nodepass file containing the shared password for
203
  the cluster, generates the SSL certificate and starts the node daemon.
204

205
  @type master_name: str
206
  @param master_name: Name of the master node
207

208
  """
209
  # Generate cluster secrets
210
  GenerateClusterCrypto(True, False, False, False, False)
211

    
212
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED])
213
  if result.failed:
214
    raise errors.OpExecError("Could not start the node daemon, command %s"
215
                             " had exitcode %s and error %s" %
216
                             (result.cmd, result.exit_code, result.output))
217

    
218
  _WaitForNodeDaemon(master_name)
219

    
220

    
221
def _WaitForNodeDaemon(node_name):
222
  """Wait for node daemon to become responsive.
223

224
  """
225
  def _CheckNodeDaemon():
226
    # Pylint bug <http://www.logilab.org/ticket/35642>
227
    # pylint: disable=E1101
228
    result = rpc.BootstrapRunner().call_version([node_name])[node_name]
229
    if result.fail_msg:
230
      raise utils.RetryAgain()
231

    
232
  try:
233
    utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
234
  except utils.RetryTimeout:
235
    raise errors.OpExecError("Node daemon on %s didn't answer queries within"
236
                             " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
237

    
238

    
239
def _WaitForMasterDaemon():
240
  """Wait for master daemon to become responsive.
241

242
  """
243
  def _CheckMasterDaemon():
244
    try:
245
      cl = luxi.Client()
246
      (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
247
    except Exception:
248
      raise utils.RetryAgain()
249

    
250
    logging.debug("Received cluster name %s from master", cluster_name)
251

    
252
  try:
253
    utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
254
  except utils.RetryTimeout:
255
    raise errors.OpExecError("Master daemon didn't answer queries within"
256
                             " %s seconds" % _DAEMON_READY_TIMEOUT)
257

    
258

    
259
def _WaitForSshDaemon(hostname, port, family):
260
  """Wait for SSH daemon to become responsive.
261

262
  """
263
  hostip = netutils.GetHostname(name=hostname, family=family).ip
264

    
265
  def _CheckSshDaemon():
266
    if netutils.TcpPing(hostip, port, timeout=1.0, live_port_needed=True):
267
      logging.debug("SSH daemon on %s:%s (IP address %s) has become"
268
                    " responsive", hostname, port, hostip)
269
    else:
270
      raise utils.RetryAgain()
271

    
272
  try:
273
    utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT)
274
  except utils.RetryTimeout:
275
    raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't"
276
                             " become responsive within %s seconds" %
277
                             (hostname, port, hostip, _DAEMON_READY_TIMEOUT))
278

    
279

    
280
def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose,
281
                    use_cluster_key, ask_key, strict_host_check, data):
282
  """Runs a command to configure something on a remote machine.
283

284
  @type cluster_name: string
285
  @param cluster_name: Cluster name
286
  @type node: string
287
  @param node: Node name
288
  @type basecmd: string
289
  @param basecmd: Base command (path on the remote machine)
290
  @type debug: bool
291
  @param debug: Enable debug output
292
  @type verbose: bool
293
  @param verbose: Enable verbose output
294
  @type use_cluster_key: bool
295
  @param use_cluster_key: See L{ssh.SshRunner.BuildCmd}
296
  @type ask_key: bool
297
  @param ask_key: See L{ssh.SshRunner.BuildCmd}
298
  @type strict_host_check: bool
299
  @param strict_host_check: See L{ssh.SshRunner.BuildCmd}
300
  @param data: JSON-serializable input data for script (passed to stdin)
301

302
  """
303
  cmd = [basecmd]
304

    
305
  # Pass --debug/--verbose to the external script if set on our invocation
306
  if debug:
307
    cmd.append("--debug")
308

    
309
  if verbose:
310
    cmd.append("--verbose")
311

    
312
  family = ssconf.SimpleStore().GetPrimaryIPFamily()
313
  srun = ssh.SshRunner(cluster_name,
314
                       ipv6=(family == netutils.IP6Address.family))
315
  scmd = srun.BuildCmd(node, constants.SSH_LOGIN_USER,
316
                       utils.ShellQuoteArgs(cmd),
317
                       batch=False, ask_key=ask_key, quiet=False,
318
                       strict_host_check=strict_host_check,
319
                       use_cluster_key=use_cluster_key)
320

    
321
  tempfh = tempfile.TemporaryFile()
322
  try:
323
    tempfh.write(serializer.DumpJson(data))
324
    tempfh.seek(0)
325

    
326
    result = utils.RunCmd(scmd, interactive=True, input_fd=tempfh)
327
  finally:
328
    tempfh.close()
329

    
330
  if result.failed:
331
    raise errors.OpExecError("Command '%s' failed: %s" %
332
                             (result.cmd, result.fail_reason))
333

    
334
  _WaitForSshDaemon(node, netutils.GetDaemonPort(constants.SSH), family)
335

    
336

    
337
def _InitFileStorage(file_storage_dir):
338
  """Initialize if needed the file storage.
339

340
  @param file_storage_dir: the user-supplied value
341
  @return: either empty string (if file storage was disabled at build
342
      time) or the normalized path to the storage directory
343

344
  """
345
  file_storage_dir = os.path.normpath(file_storage_dir)
346

    
347
  if not os.path.isabs(file_storage_dir):
348
    raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
349
                               " path" % file_storage_dir, errors.ECODE_INVAL)
350

    
351
  if not os.path.exists(file_storage_dir):
352
    try:
353
      os.makedirs(file_storage_dir, 0750)
354
    except OSError, err:
355
      raise errors.OpPrereqError("Cannot create file storage directory"
356
                                 " '%s': %s" % (file_storage_dir, err),
357
                                 errors.ECODE_ENVIRON)
358

    
359
  if not os.path.isdir(file_storage_dir):
360
    raise errors.OpPrereqError("The file storage directory '%s' is not"
361
                               " a directory." % file_storage_dir,
362
                               errors.ECODE_ENVIRON)
363
  return file_storage_dir
364

    
365

    
366
def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
367
                master_netmask, master_netdev, file_storage_dir,
368
                shared_file_storage_dir, candidate_pool_size, secondary_ip=None,
369
                vg_name=None, beparams=None, nicparams=None, ndparams=None,
370
                hvparams=None, diskparams=None, enabled_hypervisors=None,
371
                modify_etc_hosts=True, modify_ssh_setup=True,
372
                maintain_node_health=False, drbd_helper=None, uid_pool=None,
373
                default_iallocator=None, primary_ip_version=None, ipolicy=None,
374
                prealloc_wipe_disks=False, use_external_mip_script=False,
375
                hv_state=None, disk_state=None, enabled_disk_templates=None):
376
  """Initialise the cluster.
377

378
  @type candidate_pool_size: int
379
  @param candidate_pool_size: master candidate pool size
380
  @type enabled_disk_templates: list of string
381
  @param enabled_disk_templates: list of disk_templates to be used in this
382
    cluster
383

384
  """
385
  # TODO: complete the docstring
386
  if config.ConfigWriter.IsCluster():
387
    raise errors.OpPrereqError("Cluster is already initialised",
388
                               errors.ECODE_STATE)
389

    
390
  if not enabled_hypervisors:
391
    raise errors.OpPrereqError("Enabled hypervisors list must contain at"
392
                               " least one member", errors.ECODE_INVAL)
393
  invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
394
  if invalid_hvs:
395
    raise errors.OpPrereqError("Enabled hypervisors contains invalid"
396
                               " entries: %s" % invalid_hvs,
397
                               errors.ECODE_INVAL)
398

    
399
  if not enabled_disk_templates:
400
    raise errors.OpPrereqError("Enabled disk templates list must contain at"
401
                               " least one member", errors.ECODE_INVAL)
402
  invalid_disk_templates = \
403
    set(enabled_disk_templates) - constants.DISK_TEMPLATES
404
  if invalid_disk_templates:
405
    raise errors.OpPrereqError("Enabled disk templates list contains invalid"
406
                               " entries: %s" % invalid_disk_templates,
407
                               errors.ECODE_INVAL)
408

    
409
  try:
410
    ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
411
  except errors.ProgrammerError:
412
    raise errors.OpPrereqError("Invalid primary ip version: %d." %
413
                               primary_ip_version, errors.ECODE_INVAL)
414

    
415
  hostname = netutils.GetHostname(family=ipcls.family)
416
  if not ipcls.IsValid(hostname.ip):
417
    raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
418
                               " address." % (hostname.ip, primary_ip_version),
419
                               errors.ECODE_INVAL)
420

    
421
  if ipcls.IsLoopback(hostname.ip):
422
    raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
423
                               " address. Please fix DNS or %s." %
424
                               (hostname.ip, pathutils.ETC_HOSTS),
425
                               errors.ECODE_ENVIRON)
426

    
427
  if not ipcls.Own(hostname.ip):
428
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
429
                               " to %s,\nbut this ip address does not"
430
                               " belong to this host" %
431
                               hostname.ip, errors.ECODE_ENVIRON)
432

    
433
  clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
434

    
435
  if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
436
    raise errors.OpPrereqError("Cluster IP already active",
437
                               errors.ECODE_NOTUNIQUE)
438

    
439
  if not secondary_ip:
440
    if primary_ip_version == constants.IP6_VERSION:
441
      raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
442
                                 " IPv4 address must be given as secondary",
443
                                 errors.ECODE_INVAL)
444
    secondary_ip = hostname.ip
445

    
446
  if not netutils.IP4Address.IsValid(secondary_ip):
447
    raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
448
                               " IPv4 address." % secondary_ip,
449
                               errors.ECODE_INVAL)
450

    
451
  if not netutils.IP4Address.Own(secondary_ip):
452
    raise errors.OpPrereqError("You gave %s as secondary IP,"
453
                               " but it does not belong to this host." %
454
                               secondary_ip, errors.ECODE_ENVIRON)
455

    
456
  if master_netmask is not None:
457
    if not ipcls.ValidateNetmask(master_netmask):
458
      raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
459
                                  (master_netmask, primary_ip_version),
460
                                 errors.ECODE_INVAL)
461
  else:
462
    master_netmask = ipcls.iplen
463

    
464
  if vg_name:
465
    # Check if volume group is valid
466
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
467
                                          constants.MIN_VG_SIZE)
468
    if vgstatus:
469
      raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL)
470

    
471
  if drbd_helper is not None:
472
    try:
473
      curr_helper = drbd.DRBD8.GetUsermodeHelper()
474
    except errors.BlockDeviceError, err:
475
      raise errors.OpPrereqError("Error while checking drbd helper"
476
                                 " (specify --no-drbd-storage if you are not"
477
                                 " using drbd): %s" % str(err),
478
                                 errors.ECODE_ENVIRON)
479
    if drbd_helper != curr_helper:
480
      raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
481
                                 " is the current helper" % (drbd_helper,
482
                                                             curr_helper),
483
                                 errors.ECODE_INVAL)
484

    
485
  logging.debug("Stopping daemons (if any are running)")
486
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
487
  if result.failed:
488
    raise errors.OpExecError("Could not stop daemons, command %s"
489
                             " had exitcode %s and error '%s'" %
490
                             (result.cmd, result.exit_code, result.output))
491

    
492
  if constants.ENABLE_FILE_STORAGE:
493
    file_storage_dir = _InitFileStorage(file_storage_dir)
494
  else:
495
    file_storage_dir = ""
496

    
497
  if constants.ENABLE_SHARED_FILE_STORAGE:
498
    shared_file_storage_dir = _InitFileStorage(shared_file_storage_dir)
499
  else:
500
    shared_file_storage_dir = ""
501

    
502
  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
503
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
504
                               errors.ECODE_INVAL)
505

    
506
  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
507
  if result.failed:
508
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
509
                               (master_netdev,
510
                                result.output.strip()), errors.ECODE_INVAL)
511

    
512
  dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
513
  utils.EnsureDirs(dirs)
514

    
515
  objects.UpgradeBeParams(beparams)
516
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
517
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
518

    
519
  objects.NIC.CheckParameterSyntax(nicparams)
520

    
521
  full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
522

    
523
  if ndparams is not None:
524
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
525
  else:
526
    ndparams = dict(constants.NDC_DEFAULTS)
527

    
528
  # This is ugly, as we modify the dict itself
529
  # FIXME: Make utils.ForceDictType pure functional or write a wrapper
530
  # around it
531
  if hv_state:
532
    for hvname, hvs_data in hv_state.items():
533
      utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
534
      hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
535
  else:
536
    hv_state = dict((hvname, constants.HVST_DEFAULTS)
537
                    for hvname in enabled_hypervisors)
538

    
539
  # FIXME: disk_state has no default values yet
540
  if disk_state:
541
    for storage, ds_data in disk_state.items():
542
      if storage not in constants.DS_VALID_TYPES:
543
        raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
544
                                   storage, errors.ECODE_INVAL)
545
      for ds_name, state in ds_data.items():
546
        utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
547
        ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)
548

    
549
  # hvparams is a mapping of hypervisor->hvparams dict
550
  for hv_name, hv_params in hvparams.iteritems():
551
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
552
    hv_class = hypervisor.GetHypervisor(hv_name)
553
    hv_class.CheckParameterSyntax(hv_params)
554

    
555
  # diskparams is a mapping of disk-template->diskparams dict
556
  for template, dt_params in diskparams.items():
557
    param_keys = set(dt_params.keys())
558
    default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
559
    if not (param_keys <= default_param_keys):
560
      unknown_params = param_keys - default_param_keys
561
      raise errors.OpPrereqError("Invalid parameters for disk template %s:"
562
                                 " %s" % (template,
563
                                          utils.CommaJoin(unknown_params)),
564
                                 errors.ECODE_INVAL)
565
    utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
566
    if template == constants.DT_DRBD8 and vg_name is not None:
567
      # The default METAVG value is equal to the VG name set at init time,
568
      # if provided
569
      dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name
570

    
571
  try:
572
    utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
573
  except errors.OpPrereqError, err:
574
    raise errors.OpPrereqError("While verify diskparam options: %s" % err,
575
                               errors.ECODE_INVAL)
576

    
577
  # set up ssh config and /etc/hosts
578
  sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
579
  sshkey = sshline.split(" ")[1]
580

    
581
  if modify_etc_hosts:
582
    utils.AddHostToEtcHosts(hostname.name, hostname.ip)
583

    
584
  if modify_ssh_setup:
585
    _InitSSHSetup()
586

    
587
  if default_iallocator is not None:
588
    alloc_script = utils.FindFile(default_iallocator,
589
                                  constants.IALLOCATOR_SEARCH_PATH,
590
                                  os.path.isfile)
591
    if alloc_script is None:
592
      raise errors.OpPrereqError("Invalid default iallocator script '%s'"
593
                                 " specified" % default_iallocator,
594
                                 errors.ECODE_INVAL)
595
  elif constants.HTOOLS:
596
    # htools was enabled at build-time, we default to it
597
    if utils.FindFile(constants.IALLOC_HAIL,
598
                      constants.IALLOCATOR_SEARCH_PATH,
599
                      os.path.isfile):
600
      default_iallocator = constants.IALLOC_HAIL
601

    
602
  now = time.time()
603

    
604
  # init of cluster config file
605
  cluster_config = objects.Cluster(
606
    serial_no=1,
607
    rsahostkeypub=sshkey,
608
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
609
    mac_prefix=mac_prefix,
610
    volume_group_name=vg_name,
611
    tcpudp_port_pool=set(),
612
    master_node=hostname.name,
613
    master_ip=clustername.ip,
614
    master_netmask=master_netmask,
615
    master_netdev=master_netdev,
616
    cluster_name=clustername.name,
617
    file_storage_dir=file_storage_dir,
618
    shared_file_storage_dir=shared_file_storage_dir,
619
    enabled_hypervisors=enabled_hypervisors,
620
    beparams={constants.PP_DEFAULT: beparams},
621
    nicparams={constants.PP_DEFAULT: nicparams},
622
    ndparams=ndparams,
623
    hvparams=hvparams,
624
    diskparams=diskparams,
625
    candidate_pool_size=candidate_pool_size,
626
    modify_etc_hosts=modify_etc_hosts,
627
    modify_ssh_setup=modify_ssh_setup,
628
    uid_pool=uid_pool,
629
    ctime=now,
630
    mtime=now,
631
    maintain_node_health=maintain_node_health,
632
    drbd_usermode_helper=drbd_helper,
633
    default_iallocator=default_iallocator,
634
    primary_ip_family=ipcls.family,
635
    prealloc_wipe_disks=prealloc_wipe_disks,
636
    use_external_mip_script=use_external_mip_script,
637
    ipolicy=full_ipolicy,
638
    hv_state_static=hv_state,
639
    disk_state_static=disk_state,
640
    enabled_disk_templates=enabled_disk_templates,
641
    )
642
  master_node_config = objects.Node(name=hostname.name,
643
                                    primary_ip=hostname.ip,
644
                                    secondary_ip=secondary_ip,
645
                                    serial_no=1,
646
                                    master_candidate=True,
647
                                    offline=False, drained=False,
648
                                    ctime=now, mtime=now,
649
                                    )
650
  InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
651
  cfg = config.ConfigWriter(offline=True)
652
  ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
653
  cfg.Update(cfg.GetClusterInfo(), logging.error)
654
  ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
655

    
656
  # set up the inter-node password and certificate
657
  _InitGanetiServerSetup(hostname.name)
658

    
659
  logging.debug("Starting daemons")
660
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
661
  if result.failed:
662
    raise errors.OpExecError("Could not start daemons, command %s"
663
                             " had exitcode %s and error %s" %
664
                             (result.cmd, result.exit_code, result.output))
665

    
666
  _WaitForMasterDaemon()
667

    
668

    
669
def InitConfig(version, cluster_config, master_node_config,
670
               cfg_file=pathutils.CLUSTER_CONF_FILE):
671
  """Create the initial cluster configuration.
672

673
  It will contain the current node, which will also be the master
674
  node, and no instances.
675

676
  @type version: int
677
  @param version: configuration version
678
  @type cluster_config: L{objects.Cluster}
679
  @param cluster_config: cluster configuration
680
  @type master_node_config: L{objects.Node}
681
  @param master_node_config: master node configuration
682
  @type cfg_file: string
683
  @param cfg_file: configuration file path
684

685
  """
686
  uuid_generator = config.TemporaryReservationManager()
687
  cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
688
                                                _INITCONF_ECID)
689
  master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
690
                                                    _INITCONF_ECID)
691
  nodes = {
692
    master_node_config.name: master_node_config,
693
    }
694
  default_nodegroup = objects.NodeGroup(
695
    uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
696
    name=constants.INITIAL_NODE_GROUP_NAME,
697
    members=[master_node_config.name],
698
    diskparams={},
699
    )
700
  nodegroups = {
701
    default_nodegroup.uuid: default_nodegroup,
702
    }
703
  now = time.time()
704
  config_data = objects.ConfigData(version=version,
705
                                   cluster=cluster_config,
706
                                   nodegroups=nodegroups,
707
                                   nodes=nodes,
708
                                   instances={},
709
                                   networks={},
710
                                   serial_no=1,
711
                                   ctime=now, mtime=now)
712
  utils.WriteFile(cfg_file,
713
                  data=serializer.Dump(config_data.ToDict()),
714
                  mode=0600)
715

    
716

    
717
def FinalizeClusterDestroy(master):
718
  """Execute the last steps of cluster destroy
719

720
  This function shuts down all the daemons, completing the destroy
721
  begun in cmdlib.LUDestroyOpcode.
722

723
  """
724
  cfg = config.ConfigWriter()
725
  modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
726
  runner = rpc.BootstrapRunner()
727

    
728
  master_params = cfg.GetMasterNetworkParameters()
729
  master_params.name = master
730
  ems = cfg.GetUseExternalMipScript()
731
  result = runner.call_node_deactivate_master_ip(master_params.name,
732
                                                 master_params, ems)
733

    
734
  msg = result.fail_msg
735
  if msg:
736
    logging.warning("Could not disable the master IP: %s", msg)
737

    
738
  result = runner.call_node_stop_master(master)
739
  msg = result.fail_msg
740
  if msg:
741
    logging.warning("Could not disable the master role: %s", msg)
742

    
743
  result = runner.call_node_leave_cluster(master, modify_ssh_setup)
744
  msg = result.fail_msg
745
  if msg:
746
    logging.warning("Could not shutdown the node daemon and cleanup"
747
                    " the node: %s", msg)
748

    
749

    
750
def SetupNodeDaemon(opts, cluster_name, node):
751
  """Add a node to the cluster.
752

753
  This function must be called before the actual opcode, and will ssh
754
  to the remote node, copy the needed files, and start ganeti-noded,
755
  allowing the master to do the rest via normal rpc calls.
756

757
  @param cluster_name: the cluster name
758
  @param node: the name of the new node
759

760
  """
761
  data = {
762
    constants.NDS_CLUSTER_NAME: cluster_name,
763
    constants.NDS_NODE_DAEMON_CERTIFICATE:
764
      utils.ReadFile(pathutils.NODED_CERT_FILE),
765
    constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(),
766
    constants.NDS_START_NODE_DAEMON: True,
767
    }
768

    
769
  RunNodeSetupCmd(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
770
                  opts.debug, opts.verbose,
771
                  True, opts.ssh_key_check, opts.ssh_key_check, data)
772

    
773
  _WaitForNodeDaemon(node)
774

    
775

    
776
def MasterFailover(no_voting=False):
777
  """Failover the master node.
778

779
  This checks that we are not already the master, and will cause the
780
  current master to cease being master, and the non-master to become
781
  new master.
782

783
  @type no_voting: boolean
784
  @param no_voting: force the operation without remote nodes agreement
785
                      (dangerous)
786

787
  """
788
  sstore = ssconf.SimpleStore()
789

    
790
  old_master, new_master = ssconf.GetMasterAndMyself(sstore)
791
  node_list = sstore.GetNodeList()
792
  mc_list = sstore.GetMasterCandidates()
793

    
794
  if old_master == new_master:
795
    raise errors.OpPrereqError("This commands must be run on the node"
796
                               " where you want the new master to be."
797
                               " %s is already the master" %
798
                               old_master, errors.ECODE_INVAL)
799

    
800
  if new_master not in mc_list:
801
    mc_no_master = [name for name in mc_list if name != old_master]
802
    raise errors.OpPrereqError("This node is not among the nodes marked"
803
                               " as master candidates. Only these nodes"
804
                               " can become masters. Current list of"
805
                               " master candidates is:\n"
806
                               "%s" % ("\n".join(mc_no_master)),
807
                               errors.ECODE_STATE)
808

    
809
  if not no_voting:
810
    vote_list = GatherMasterVotes(node_list)
811

    
812
    if vote_list:
813
      voted_master = vote_list[0][0]
814
      if voted_master is None:
815
        raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
816
                                   " not respond.", errors.ECODE_ENVIRON)
817
      elif voted_master != old_master:
818
        raise errors.OpPrereqError("I have a wrong configuration, I believe"
819
                                   " the master is %s but the other nodes"
820
                                   " voted %s. Please resync the configuration"
821
                                   " of this node." %
822
                                   (old_master, voted_master),
823
                                   errors.ECODE_STATE)
824
  # end checks
825

    
826
  rcode = 0
827

    
828
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
829

    
830
  try:
831
    # instantiate a real config writer, as we now know we have the
832
    # configuration data
833
    cfg = config.ConfigWriter(accept_foreign=True)
834

    
835
    cluster_info = cfg.GetClusterInfo()
836
    cluster_info.master_node = new_master
837
    # this will also regenerate the ssconf files, since we updated the
838
    # cluster info
839
    cfg.Update(cluster_info, logging.error)
840
  except errors.ConfigurationError, err:
841
    logging.error("Error while trying to set the new master: %s",
842
                  str(err))
843
    return 1
844

    
845
  # if cfg.Update worked, then it means the old master daemon won't be
846
  # able now to write its own config file (we rely on locking in both
847
  # backend.UploadFile() and ConfigWriter._Write(); hence the next
848
  # step is to kill the old master
849

    
850
  logging.info("Stopping the master daemon on node %s", old_master)
851

    
852
  runner = rpc.BootstrapRunner()
853
  master_params = cfg.GetMasterNetworkParameters()
854
  master_params.name = old_master
855
  ems = cfg.GetUseExternalMipScript()
856
  result = runner.call_node_deactivate_master_ip(master_params.name,
857
                                                 master_params, ems)
858

    
859
  msg = result.fail_msg
860
  if msg:
861
    logging.warning("Could not disable the master IP: %s", msg)
862

    
863
  result = runner.call_node_stop_master(old_master)
864
  msg = result.fail_msg
865
  if msg:
866
    logging.error("Could not disable the master role on the old master"
867
                  " %s, please disable manually: %s", old_master, msg)
868

    
869
  logging.info("Checking master IP non-reachability...")
870

    
871
  master_ip = sstore.GetMasterIP()
872
  total_timeout = 30
873

    
874
  # Here we have a phase where no master should be running
875
  def _check_ip():
876
    if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
877
      raise utils.RetryAgain()
878

    
879
  try:
880
    utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
881
  except utils.RetryTimeout:
882
    logging.warning("The master IP is still reachable after %s seconds,"
883
                    " continuing but activating the master on the current"
884
                    " node will probably fail", total_timeout)
885

    
886
  if jstore.CheckDrainFlag():
887
    logging.info("Undraining job queue")
888
    jstore.SetDrainFlag(False)
889

    
890
  logging.info("Starting the master daemons on the new master")
891

    
892
  result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
893
                                                                no_voting)
894
  msg = result.fail_msg
895
  if msg:
896
    logging.error("Could not start the master role on the new master"
897
                  " %s, please check: %s", new_master, msg)
898
    rcode = 1
899

    
900
  logging.info("Master failed over from %s to %s", old_master, new_master)
901
  return rcode
902

    
903

    
904
def GetMaster():
905
  """Returns the current master node.
906

907
  This is a separate function in bootstrap since it's needed by
908
  gnt-cluster, and instead of importing directly ssconf, it's better
909
  to abstract it in bootstrap, where we do use ssconf in other
910
  functions too.
911

912
  """
913
  sstore = ssconf.SimpleStore()
914

    
915
  old_master, _ = ssconf.GetMasterAndMyself(sstore)
916

    
917
  return old_master
918

    
919

    
920
def GatherMasterVotes(node_list):
921
  """Check the agreement on who is the master.
922

923
  This function will return a list of (node, number of votes), ordered
924
  by the number of votes. Errors will be denoted by the key 'None'.
925

926
  Note that the sum of votes is the number of nodes this machine
927
  knows, whereas the number of entries in the list could be different
928
  (if some nodes vote for another master).
929

930
  We remove ourselves from the list since we know that (bugs aside)
931
  since we use the same source for configuration information for both
932
  backend and boostrap, we'll always vote for ourselves.
933

934
  @type node_list: list
935
  @param node_list: the list of nodes to query for master info; the current
936
      node will be removed if it is in the list
937
  @rtype: list
938
  @return: list of (node, votes)
939

940
  """
941
  myself = netutils.Hostname.GetSysName()
942
  try:
943
    node_list.remove(myself)
944
  except ValueError:
945
    pass
946
  if not node_list:
947
    # no nodes left (eventually after removing myself)
948
    return []
949
  results = rpc.BootstrapRunner().call_master_info(node_list)
950
  if not isinstance(results, dict):
951
    # this should not happen (unless internal error in rpc)
952
    logging.critical("Can't complete rpc call, aborting master startup")
953
    return [(None, len(node_list))]
954
  votes = {}
955
  for node in results:
956
    nres = results[node]
957
    data = nres.payload
958
    msg = nres.fail_msg
959
    fail = False
960
    if msg:
961
      logging.warning("Error contacting node %s: %s", node, msg)
962
      fail = True
963
    # for now we accept both length 3, 4 and 5 (data[3] is primary ip version
964
    # and data[4] is the master netmask)
965
    elif not isinstance(data, (tuple, list)) or len(data) < 3:
966
      logging.warning("Invalid data received from node %s: %s", node, data)
967
      fail = True
968
    if fail:
969
      if None not in votes:
970
        votes[None] = 0
971
      votes[None] += 1
972
      continue
973
    master_node = data[2]
974
    if master_node not in votes:
975
      votes[master_node] = 0
976
    votes[master_node] += 1
977

    
978
  vote_list = [v for v in votes.items()]
979
  # sort first on number of votes then on name, since we want None
980
  # sorted later if we have the half of the nodes not responding, and
981
  # half voting all for the same master
982
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
983

    
984
  return vote_list