Statistics
| Branch: | Tag: | Revision:

root / lib / bootstrap.py @ fb62843c

History | View | Annotate | Download (40.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions to bootstrap a new cluster.
23

24
"""
25

    
26
import os
27
import os.path
28
import re
29
import logging
30
import time
31
import tempfile
32

    
33
from ganeti.cmdlib import cluster
34
from ganeti import rpc
35
from ganeti import ssh
36
from ganeti import utils
37
from ganeti import errors
38
from ganeti import config
39
from ganeti import constants
40
from ganeti import objects
41
from ganeti import ssconf
42
from ganeti import serializer
43
from ganeti import hypervisor
44
from ganeti.storage import drbd
45
from ganeti.storage import filestorage
46
from ganeti import netutils
47
from ganeti import luxi
48
from ganeti import jstore
49
from ganeti import pathutils
50

    
51

    
52
# ec_id for InitConfig's temporary reservation manager
53
_INITCONF_ECID = "initconfig-ecid"
54

    
55
#: After how many seconds daemon must be responsive
56
_DAEMON_READY_TIMEOUT = 10.0
57

    
58

    
59
def _InitSSHSetup():
60
  """Setup the SSH configuration for the cluster.
61

62
  This generates a dsa keypair for root, adds the pub key to the
63
  permitted hosts and adds the hostkey to its own known hosts.
64

65
  """
66
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
67

    
68
  for name in priv_key, pub_key:
69
    if os.path.exists(name):
70
      utils.CreateBackup(name)
71
    utils.RemoveFile(name)
72

    
73
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
74
                         "-f", priv_key,
75
                         "-q", "-N", ""])
76
  if result.failed:
77
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
78
                             result.output)
79

    
80
  utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
81

    
82

    
83
def GenerateHmacKey(file_name):
84
  """Writes a new HMAC key.
85

86
  @type file_name: str
87
  @param file_name: Path to output file
88

89
  """
90
  utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
91
                  backup=True)
92

    
93

    
94
def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
95
                          new_confd_hmac_key, new_cds,
96
                          rapi_cert_pem=None, spice_cert_pem=None,
97
                          spice_cacert_pem=None, cds=None,
98
                          nodecert_file=pathutils.NODED_CERT_FILE,
99
                          rapicert_file=pathutils.RAPI_CERT_FILE,
100
                          spicecert_file=pathutils.SPICE_CERT_FILE,
101
                          spicecacert_file=pathutils.SPICE_CACERT_FILE,
102
                          hmackey_file=pathutils.CONFD_HMAC_KEY,
103
                          cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
104
  """Updates the cluster certificates, keys and secrets.
105

106
  @type new_cluster_cert: bool
107
  @param new_cluster_cert: Whether to generate a new cluster certificate
108
  @type new_rapi_cert: bool
109
  @param new_rapi_cert: Whether to generate a new RAPI certificate
110
  @type new_spice_cert: bool
111
  @param new_spice_cert: Whether to generate a new SPICE certificate
112
  @type new_confd_hmac_key: bool
113
  @param new_confd_hmac_key: Whether to generate a new HMAC key
114
  @type new_cds: bool
115
  @param new_cds: Whether to generate a new cluster domain secret
116
  @type rapi_cert_pem: string
117
  @param rapi_cert_pem: New RAPI certificate in PEM format
118
  @type spice_cert_pem: string
119
  @param spice_cert_pem: New SPICE certificate in PEM format
120
  @type spice_cacert_pem: string
121
  @param spice_cacert_pem: Certificate of the CA that signed the SPICE
122
                           certificate, in PEM format
123
  @type cds: string
124
  @param cds: New cluster domain secret
125
  @type nodecert_file: string
126
  @param nodecert_file: optional override of the node cert file path
127
  @type rapicert_file: string
128
  @param rapicert_file: optional override of the rapi cert file path
129
  @type spicecert_file: string
130
  @param spicecert_file: optional override of the spice cert file path
131
  @type spicecacert_file: string
132
  @param spicecacert_file: optional override of the spice CA cert file path
133
  @type hmackey_file: string
134
  @param hmackey_file: optional override of the hmac key file path
135

136
  """
137
  # noded SSL certificate
138
  cluster_cert_exists = os.path.exists(nodecert_file)
139
  if new_cluster_cert or not cluster_cert_exists:
140
    if cluster_cert_exists:
141
      utils.CreateBackup(nodecert_file)
142

    
143
    logging.debug("Generating new cluster certificate at %s", nodecert_file)
144
    utils.GenerateSelfSignedSslCert(nodecert_file)
145

    
146
  # confd HMAC key
147
  if new_confd_hmac_key or not os.path.exists(hmackey_file):
148
    logging.debug("Writing new confd HMAC key to %s", hmackey_file)
149
    GenerateHmacKey(hmackey_file)
150

    
151
  # RAPI
152
  rapi_cert_exists = os.path.exists(rapicert_file)
153

    
154
  if rapi_cert_pem:
155
    # Assume rapi_pem contains a valid PEM-formatted certificate and key
156
    logging.debug("Writing RAPI certificate at %s", rapicert_file)
157
    utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
158

    
159
  elif new_rapi_cert or not rapi_cert_exists:
160
    if rapi_cert_exists:
161
      utils.CreateBackup(rapicert_file)
162

    
163
    logging.debug("Generating new RAPI certificate at %s", rapicert_file)
164
    utils.GenerateSelfSignedSslCert(rapicert_file)
165

    
166
  # SPICE
167
  spice_cert_exists = os.path.exists(spicecert_file)
168
  spice_cacert_exists = os.path.exists(spicecacert_file)
169
  if spice_cert_pem:
170
    # spice_cert_pem implies also spice_cacert_pem
171
    logging.debug("Writing SPICE certificate at %s", spicecert_file)
172
    utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
173
    logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
174
    utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
175
  elif new_spice_cert or not spice_cert_exists:
176
    if spice_cert_exists:
177
      utils.CreateBackup(spicecert_file)
178
    if spice_cacert_exists:
179
      utils.CreateBackup(spicecacert_file)
180

    
181
    logging.debug("Generating new self-signed SPICE certificate at %s",
182
                  spicecert_file)
183
    (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file)
184

    
185
    # Self-signed certificate -> the public certificate is also the CA public
186
    # certificate
187
    logging.debug("Writing the public certificate to %s",
188
                  spicecert_file)
189
    utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
190

    
191
  # Cluster domain secret
192
  if cds:
193
    logging.debug("Writing cluster domain secret to %s", cds_file)
194
    utils.WriteFile(cds_file, data=cds, backup=True)
195

    
196
  elif new_cds or not os.path.exists(cds_file):
197
    logging.debug("Generating new cluster domain secret at %s", cds_file)
198
    GenerateHmacKey(cds_file)
199

    
200

    
201
def _InitGanetiServerSetup(master_name):
202
  """Setup the necessary configuration for the initial node daemon.
203

204
  This creates the nodepass file containing the shared password for
205
  the cluster, generates the SSL certificate and starts the node daemon.
206

207
  @type master_name: str
208
  @param master_name: Name of the master node
209

210
  """
211
  # Generate cluster secrets
212
  GenerateClusterCrypto(True, False, False, False, False)
213

    
214
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED])
215
  if result.failed:
216
    raise errors.OpExecError("Could not start the node daemon, command %s"
217
                             " had exitcode %s and error %s" %
218
                             (result.cmd, result.exit_code, result.output))
219

    
220
  _WaitForNodeDaemon(master_name)
221

    
222

    
223
def _WaitForNodeDaemon(node_name):
224
  """Wait for node daemon to become responsive.
225

226
  """
227
  def _CheckNodeDaemon():
228
    # Pylint bug <http://www.logilab.org/ticket/35642>
229
    # pylint: disable=E1101
230
    result = rpc.BootstrapRunner().call_version([node_name])[node_name]
231
    if result.fail_msg:
232
      raise utils.RetryAgain()
233

    
234
  try:
235
    utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
236
  except utils.RetryTimeout:
237
    raise errors.OpExecError("Node daemon on %s didn't answer queries within"
238
                             " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
239

    
240

    
241
def _WaitForMasterDaemon():
242
  """Wait for master daemon to become responsive.
243

244
  """
245
  def _CheckMasterDaemon():
246
    try:
247
      cl = luxi.Client()
248
      (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
249
    except Exception:
250
      raise utils.RetryAgain()
251

    
252
    logging.debug("Received cluster name %s from master", cluster_name)
253

    
254
  try:
255
    utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
256
  except utils.RetryTimeout:
257
    raise errors.OpExecError("Master daemon didn't answer queries within"
258
                             " %s seconds" % _DAEMON_READY_TIMEOUT)
259

    
260

    
261
def _WaitForSshDaemon(hostname, port, family):
262
  """Wait for SSH daemon to become responsive.
263

264
  """
265
  hostip = netutils.GetHostname(name=hostname, family=family).ip
266

    
267
  def _CheckSshDaemon():
268
    if netutils.TcpPing(hostip, port, timeout=1.0, live_port_needed=True):
269
      logging.debug("SSH daemon on %s:%s (IP address %s) has become"
270
                    " responsive", hostname, port, hostip)
271
    else:
272
      raise utils.RetryAgain()
273

    
274
  try:
275
    utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT)
276
  except utils.RetryTimeout:
277
    raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't"
278
                             " become responsive within %s seconds" %
279
                             (hostname, port, hostip, _DAEMON_READY_TIMEOUT))
280

    
281

    
282
def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose,
283
                    use_cluster_key, ask_key, strict_host_check, data):
284
  """Runs a command to configure something on a remote machine.
285

286
  @type cluster_name: string
287
  @param cluster_name: Cluster name
288
  @type node: string
289
  @param node: Node name
290
  @type basecmd: string
291
  @param basecmd: Base command (path on the remote machine)
292
  @type debug: bool
293
  @param debug: Enable debug output
294
  @type verbose: bool
295
  @param verbose: Enable verbose output
296
  @type use_cluster_key: bool
297
  @param use_cluster_key: See L{ssh.SshRunner.BuildCmd}
298
  @type ask_key: bool
299
  @param ask_key: See L{ssh.SshRunner.BuildCmd}
300
  @type strict_host_check: bool
301
  @param strict_host_check: See L{ssh.SshRunner.BuildCmd}
302
  @param data: JSON-serializable input data for script (passed to stdin)
303

304
  """
305
  cmd = [basecmd]
306

    
307
  # Pass --debug/--verbose to the external script if set on our invocation
308
  if debug:
309
    cmd.append("--debug")
310

    
311
  if verbose:
312
    cmd.append("--verbose")
313

    
314
  family = ssconf.SimpleStore().GetPrimaryIPFamily()
315
  srun = ssh.SshRunner(cluster_name,
316
                       ipv6=(family == netutils.IP6Address.family))
317
  scmd = srun.BuildCmd(node, constants.SSH_LOGIN_USER,
318
                       utils.ShellQuoteArgs(cmd),
319
                       batch=False, ask_key=ask_key, quiet=False,
320
                       strict_host_check=strict_host_check,
321
                       use_cluster_key=use_cluster_key)
322

    
323
  tempfh = tempfile.TemporaryFile()
324
  try:
325
    tempfh.write(serializer.DumpJson(data))
326
    tempfh.seek(0)
327

    
328
    result = utils.RunCmd(scmd, interactive=True, input_fd=tempfh)
329
  finally:
330
    tempfh.close()
331

    
332
  if result.failed:
333
    raise errors.OpExecError("Command '%s' failed: %s" %
334
                             (result.cmd, result.fail_reason))
335

    
336
  _WaitForSshDaemon(node, netutils.GetDaemonPort(constants.SSH), family)
337

    
338

    
339
def _InitFileStorageDir(file_storage_dir):
340
  """Initialize if needed the file storage.
341

342
  @param file_storage_dir: the user-supplied value
343
  @return: either empty string (if file storage was disabled at build
344
      time) or the normalized path to the storage directory
345

346
  """
347
  file_storage_dir = os.path.normpath(file_storage_dir)
348

    
349
  if not os.path.isabs(file_storage_dir):
350
    raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
351
                               " path" % file_storage_dir, errors.ECODE_INVAL)
352

    
353
  if not os.path.exists(file_storage_dir):
354
    try:
355
      os.makedirs(file_storage_dir, 0750)
356
    except OSError, err:
357
      raise errors.OpPrereqError("Cannot create file storage directory"
358
                                 " '%s': %s" % (file_storage_dir, err),
359
                                 errors.ECODE_ENVIRON)
360

    
361
  if not os.path.isdir(file_storage_dir):
362
    raise errors.OpPrereqError("The file storage directory '%s' is not"
363
                               " a directory." % file_storage_dir,
364
                               errors.ECODE_ENVIRON)
365

    
366
  return file_storage_dir
367

    
368

    
369
def _PrepareFileBasedStorage(
370
    enabled_disk_templates, file_storage_dir,
371
    default_dir, file_disk_template,
372
    init_fn=_InitFileStorageDir, acceptance_fn=None):
373
  """Checks if a file-base storage type is enabled and inits the dir.
374

375
  @type enabled_disk_templates: list of string
376
  @param enabled_disk_templates: list of enabled disk templates
377
  @type file_storage_dir: string
378
  @param file_storage_dir: the file storage directory
379
  @type default_dir: string
380
  @param default_dir: default file storage directory when C{file_storage_dir}
381
      is 'None'
382
  @type file_disk_template: string
383
  @param file_disk_template: a disk template whose storage type is 'ST_FILE'
384
  @rtype: string
385
  @returns: the name of the actual file storage directory
386

387
  """
388
  assert (file_disk_template in
389
          utils.storage.GetDiskTemplatesOfStorageType(constants.ST_FILE))
390
  if file_storage_dir is None:
391
    file_storage_dir = default_dir
392
  if not acceptance_fn:
393
    acceptance_fn = \
394
        lambda path: filestorage.CheckFileStoragePathAcceptance(
395
            path, exact_match_ok=True)
396

    
397
  cluster.CheckFileStoragePathVsEnabledDiskTemplates(
398
      logging.warning, file_storage_dir, enabled_disk_templates)
399

    
400
  file_storage_enabled = file_disk_template in enabled_disk_templates
401
  if file_storage_enabled:
402
    try:
403
      acceptance_fn(file_storage_dir)
404
    except errors.FileStoragePathError as e:
405
      raise errors.OpPrereqError(str(e))
406
    result_file_storage_dir = init_fn(file_storage_dir)
407
  else:
408
    result_file_storage_dir = file_storage_dir
409
  return result_file_storage_dir
410

    
411

    
412
def _PrepareFileStorage(
413
    enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir,
414
    acceptance_fn=None):
415
  """Checks if file storage is enabled and inits the dir.
416

417
  @see: C{_PrepareFileBasedStorage}
418

419
  """
420
  return _PrepareFileBasedStorage(
421
      enabled_disk_templates, file_storage_dir,
422
      pathutils.DEFAULT_FILE_STORAGE_DIR, constants.DT_FILE,
423
      init_fn=init_fn, acceptance_fn=acceptance_fn)
424

    
425

    
426
def _PrepareSharedFileStorage(
427
    enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir,
428
    acceptance_fn=None):
429
  """Checks if shared file storage is enabled and inits the dir.
430

431
  @see: C{_PrepareFileBasedStorage}
432

433
  """
434
  return _PrepareFileBasedStorage(
435
      enabled_disk_templates, file_storage_dir,
436
      pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR, constants.DT_SHARED_FILE,
437
      init_fn=init_fn, acceptance_fn=acceptance_fn)
438

    
439

    
440
def _InitCheckEnabledDiskTemplates(enabled_disk_templates):
441
  """Checks the sanity of the enabled disk templates.
442

443
  """
444
  if not enabled_disk_templates:
445
    raise errors.OpPrereqError("Enabled disk templates list must contain at"
446
                               " least one member", errors.ECODE_INVAL)
447
  invalid_disk_templates = \
448
    set(enabled_disk_templates) - constants.DISK_TEMPLATES
449
  if invalid_disk_templates:
450
    raise errors.OpPrereqError("Enabled disk templates list contains invalid"
451
                               " entries: %s" % invalid_disk_templates,
452
                               errors.ECODE_INVAL)
453

    
454

    
455
def _RestrictIpolicyToEnabledDiskTemplates(ipolicy, enabled_disk_templates):
456
  """Restricts the ipolicy's disk templates to the enabled ones.
457

458
  This function clears the ipolicy's list of allowed disk templates from the
459
  ones that are not enabled by the cluster.
460

461
  @type ipolicy: dict
462
  @param ipolicy: the instance policy
463
  @type enabled_disk_templates: list of string
464
  @param enabled_disk_templates: the list of cluster-wide enabled disk
465
    templates
466

467
  """
468
  assert constants.IPOLICY_DTS in ipolicy
469
  allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
470
  restricted_disk_templates = list(set(allowed_disk_templates)
471
                                   .intersection(set(enabled_disk_templates)))
472
  ipolicy[constants.IPOLICY_DTS] = restricted_disk_templates
473

    
474

    
475
def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
476
                master_netmask, master_netdev, file_storage_dir,
477
                shared_file_storage_dir, candidate_pool_size, secondary_ip=None,
478
                vg_name=None, beparams=None, nicparams=None, ndparams=None,
479
                hvparams=None, diskparams=None, enabled_hypervisors=None,
480
                modify_etc_hosts=True, modify_ssh_setup=True,
481
                maintain_node_health=False, drbd_helper=None, uid_pool=None,
482
                default_iallocator=None, primary_ip_version=None, ipolicy=None,
483
                prealloc_wipe_disks=False, use_external_mip_script=False,
484
                hv_state=None, disk_state=None, enabled_disk_templates=None):
485
  """Initialise the cluster.
486

487
  @type candidate_pool_size: int
488
  @param candidate_pool_size: master candidate pool size
489
  @type enabled_disk_templates: list of string
490
  @param enabled_disk_templates: list of disk_templates to be used in this
491
    cluster
492

493
  """
494
  # TODO: complete the docstring
495
  if config.ConfigWriter.IsCluster():
496
    raise errors.OpPrereqError("Cluster is already initialised",
497
                               errors.ECODE_STATE)
498

    
499
  if not enabled_hypervisors:
500
    raise errors.OpPrereqError("Enabled hypervisors list must contain at"
501
                               " least one member", errors.ECODE_INVAL)
502
  invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
503
  if invalid_hvs:
504
    raise errors.OpPrereqError("Enabled hypervisors contains invalid"
505
                               " entries: %s" % invalid_hvs,
506
                               errors.ECODE_INVAL)
507

    
508
  _InitCheckEnabledDiskTemplates(enabled_disk_templates)
509

    
510
  try:
511
    ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
512
  except errors.ProgrammerError:
513
    raise errors.OpPrereqError("Invalid primary ip version: %d." %
514
                               primary_ip_version, errors.ECODE_INVAL)
515

    
516
  hostname = netutils.GetHostname(family=ipcls.family)
517
  if not ipcls.IsValid(hostname.ip):
518
    raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
519
                               " address." % (hostname.ip, primary_ip_version),
520
                               errors.ECODE_INVAL)
521

    
522
  if ipcls.IsLoopback(hostname.ip):
523
    raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
524
                               " address. Please fix DNS or %s." %
525
                               (hostname.ip, pathutils.ETC_HOSTS),
526
                               errors.ECODE_ENVIRON)
527

    
528
  if not ipcls.Own(hostname.ip):
529
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
530
                               " to %s,\nbut this ip address does not"
531
                               " belong to this host" %
532
                               hostname.ip, errors.ECODE_ENVIRON)
533

    
534
  clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
535

    
536
  if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
537
    raise errors.OpPrereqError("Cluster IP already active",
538
                               errors.ECODE_NOTUNIQUE)
539

    
540
  if not secondary_ip:
541
    if primary_ip_version == constants.IP6_VERSION:
542
      raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
543
                                 " IPv4 address must be given as secondary",
544
                                 errors.ECODE_INVAL)
545
    secondary_ip = hostname.ip
546

    
547
  if not netutils.IP4Address.IsValid(secondary_ip):
548
    raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
549
                               " IPv4 address." % secondary_ip,
550
                               errors.ECODE_INVAL)
551

    
552
  if not netutils.IP4Address.Own(secondary_ip):
553
    raise errors.OpPrereqError("You gave %s as secondary IP,"
554
                               " but it does not belong to this host." %
555
                               secondary_ip, errors.ECODE_ENVIRON)
556

    
557
  if master_netmask is not None:
558
    if not ipcls.ValidateNetmask(master_netmask):
559
      raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
560
                                  (master_netmask, primary_ip_version),
561
                                 errors.ECODE_INVAL)
562
  else:
563
    master_netmask = ipcls.iplen
564

    
565
  if vg_name:
566
    # Check if volume group is valid
567
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
568
                                          constants.MIN_VG_SIZE)
569
    if vgstatus:
570
      raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL)
571

    
572
  if drbd_helper is not None:
573
    try:
574
      curr_helper = drbd.DRBD8.GetUsermodeHelper()
575
    except errors.BlockDeviceError, err:
576
      raise errors.OpPrereqError("Error while checking drbd helper"
577
                                 " (specify --no-drbd-storage if you are not"
578
                                 " using drbd): %s" % str(err),
579
                                 errors.ECODE_ENVIRON)
580
    if drbd_helper != curr_helper:
581
      raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
582
                                 " is the current helper" % (drbd_helper,
583
                                                             curr_helper),
584
                                 errors.ECODE_INVAL)
585

    
586
  logging.debug("Stopping daemons (if any are running)")
587
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
588
  if result.failed:
589
    raise errors.OpExecError("Could not stop daemons, command %s"
590
                             " had exitcode %s and error '%s'" %
591
                             (result.cmd, result.exit_code, result.output))
592

    
593
  file_storage_dir = _PrepareFileStorage(enabled_disk_templates,
594
                                         file_storage_dir)
595
  shared_file_storage_dir = _PrepareSharedFileStorage(enabled_disk_templates,
596
                                                      shared_file_storage_dir)
597

    
598
  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
599
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
600
                               errors.ECODE_INVAL)
601

    
602
  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
603
  if result.failed:
604
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
605
                               (master_netdev,
606
                                result.output.strip()), errors.ECODE_INVAL)
607

    
608
  dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
609
  utils.EnsureDirs(dirs)
610

    
611
  objects.UpgradeBeParams(beparams)
612
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
613
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
614

    
615
  objects.NIC.CheckParameterSyntax(nicparams)
616

    
617
  full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
618
  _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates)
619

    
620
  if ndparams is not None:
621
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
622
  else:
623
    ndparams = dict(constants.NDC_DEFAULTS)
624

    
625
  # This is ugly, as we modify the dict itself
626
  # FIXME: Make utils.ForceDictType pure functional or write a wrapper
627
  # around it
628
  if hv_state:
629
    for hvname, hvs_data in hv_state.items():
630
      utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
631
      hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
632
  else:
633
    hv_state = dict((hvname, constants.HVST_DEFAULTS)
634
                    for hvname in enabled_hypervisors)
635

    
636
  # FIXME: disk_state has no default values yet
637
  if disk_state:
638
    for storage, ds_data in disk_state.items():
639
      if storage not in constants.DS_VALID_TYPES:
640
        raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
641
                                   storage, errors.ECODE_INVAL)
642
      for ds_name, state in ds_data.items():
643
        utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
644
        ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)
645

    
646
  # hvparams is a mapping of hypervisor->hvparams dict
647
  for hv_name, hv_params in hvparams.iteritems():
648
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
649
    hv_class = hypervisor.GetHypervisor(hv_name)
650
    hv_class.CheckParameterSyntax(hv_params)
651

    
652
  # diskparams is a mapping of disk-template->diskparams dict
653
  for template, dt_params in diskparams.items():
654
    param_keys = set(dt_params.keys())
655
    default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
656
    if not (param_keys <= default_param_keys):
657
      unknown_params = param_keys - default_param_keys
658
      raise errors.OpPrereqError("Invalid parameters for disk template %s:"
659
                                 " %s" % (template,
660
                                          utils.CommaJoin(unknown_params)),
661
                                 errors.ECODE_INVAL)
662
    utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
663
    if template == constants.DT_DRBD8 and vg_name is not None:
664
      # The default METAVG value is equal to the VG name set at init time,
665
      # if provided
666
      dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name
667

    
668
  try:
669
    utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
670
  except errors.OpPrereqError, err:
671
    raise errors.OpPrereqError("While verify diskparam options: %s" % err,
672
                               errors.ECODE_INVAL)
673

    
674
  # set up ssh config and /etc/hosts
675
  rsa_sshkey = ""
676
  dsa_sshkey = ""
677
  if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
678
    sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
679
    rsa_sshkey = sshline.split(" ")[1]
680
  if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
681
    sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
682
    dsa_sshkey = sshline.split(" ")[1]
683
  if not rsa_sshkey and not dsa_sshkey:
684
    raise errors.OpPrereqError("Failed to find SSH public keys",
685
                               errors.ECODE_ENVIRON)
686

    
687
  if modify_etc_hosts:
688
    utils.AddHostToEtcHosts(hostname.name, hostname.ip)
689

    
690
  if modify_ssh_setup:
691
    _InitSSHSetup()
692

    
693
  if default_iallocator is not None:
694
    alloc_script = utils.FindFile(default_iallocator,
695
                                  constants.IALLOCATOR_SEARCH_PATH,
696
                                  os.path.isfile)
697
    if alloc_script is None:
698
      raise errors.OpPrereqError("Invalid default iallocator script '%s'"
699
                                 " specified" % default_iallocator,
700
                                 errors.ECODE_INVAL)
701
  elif constants.HTOOLS:
702
    # htools was enabled at build-time, we default to it
703
    if utils.FindFile(constants.IALLOC_HAIL,
704
                      constants.IALLOCATOR_SEARCH_PATH,
705
                      os.path.isfile):
706
      default_iallocator = constants.IALLOC_HAIL
707

    
708
  now = time.time()
709

    
710
  # init of cluster config file
711
  cluster_config = objects.Cluster(
712
    serial_no=1,
713
    rsahostkeypub=rsa_sshkey,
714
    dsahostkeypub=dsa_sshkey,
715
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
716
    mac_prefix=mac_prefix,
717
    volume_group_name=vg_name,
718
    tcpudp_port_pool=set(),
719
    master_ip=clustername.ip,
720
    master_netmask=master_netmask,
721
    master_netdev=master_netdev,
722
    cluster_name=clustername.name,
723
    file_storage_dir=file_storage_dir,
724
    shared_file_storage_dir=shared_file_storage_dir,
725
    enabled_hypervisors=enabled_hypervisors,
726
    beparams={constants.PP_DEFAULT: beparams},
727
    nicparams={constants.PP_DEFAULT: nicparams},
728
    ndparams=ndparams,
729
    hvparams=hvparams,
730
    diskparams=diskparams,
731
    candidate_pool_size=candidate_pool_size,
732
    modify_etc_hosts=modify_etc_hosts,
733
    modify_ssh_setup=modify_ssh_setup,
734
    uid_pool=uid_pool,
735
    ctime=now,
736
    mtime=now,
737
    maintain_node_health=maintain_node_health,
738
    drbd_usermode_helper=drbd_helper,
739
    default_iallocator=default_iallocator,
740
    primary_ip_family=ipcls.family,
741
    prealloc_wipe_disks=prealloc_wipe_disks,
742
    use_external_mip_script=use_external_mip_script,
743
    ipolicy=full_ipolicy,
744
    hv_state_static=hv_state,
745
    disk_state_static=disk_state,
746
    enabled_disk_templates=enabled_disk_templates,
747
    )
748
  master_node_config = objects.Node(name=hostname.name,
749
                                    primary_ip=hostname.ip,
750
                                    secondary_ip=secondary_ip,
751
                                    serial_no=1,
752
                                    master_candidate=True,
753
                                    offline=False, drained=False,
754
                                    ctime=now, mtime=now,
755
                                    )
756
  InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
757
  cfg = config.ConfigWriter(offline=True)
758
  ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
759
  cfg.Update(cfg.GetClusterInfo(), logging.error)
760
  ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
761

    
762
  # set up the inter-node password and certificate
763
  _InitGanetiServerSetup(hostname.name)
764

    
765
  logging.debug("Starting daemons")
766
  result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
767
  if result.failed:
768
    raise errors.OpExecError("Could not start daemons, command %s"
769
                             " had exitcode %s and error %s" %
770
                             (result.cmd, result.exit_code, result.output))
771

    
772
  _WaitForMasterDaemon()
773

    
774

    
775
def InitConfig(version, cluster_config, master_node_config,
776
               cfg_file=pathutils.CLUSTER_CONF_FILE):
777
  """Create the initial cluster configuration.
778

779
  It will contain the current node, which will also be the master
780
  node, and no instances.
781

782
  @type version: int
783
  @param version: configuration version
784
  @type cluster_config: L{objects.Cluster}
785
  @param cluster_config: cluster configuration
786
  @type master_node_config: L{objects.Node}
787
  @param master_node_config: master node configuration
788
  @type cfg_file: string
789
  @param cfg_file: configuration file path
790

791
  """
792
  uuid_generator = config.TemporaryReservationManager()
793
  cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
794
                                                _INITCONF_ECID)
795
  master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
796
                                                    _INITCONF_ECID)
797
  cluster_config.master_node = master_node_config.uuid
798
  nodes = {
799
    master_node_config.uuid: master_node_config,
800
    }
801
  default_nodegroup = objects.NodeGroup(
802
    uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
803
    name=constants.INITIAL_NODE_GROUP_NAME,
804
    members=[master_node_config.uuid],
805
    diskparams={},
806
    )
807
  nodegroups = {
808
    default_nodegroup.uuid: default_nodegroup,
809
    }
810
  now = time.time()
811
  config_data = objects.ConfigData(version=version,
812
                                   cluster=cluster_config,
813
                                   nodegroups=nodegroups,
814
                                   nodes=nodes,
815
                                   instances={},
816
                                   networks={},
817
                                   serial_no=1,
818
                                   ctime=now, mtime=now)
819
  utils.WriteFile(cfg_file,
820
                  data=serializer.Dump(config_data.ToDict()),
821
                  mode=0600)
822

    
823

    
824
def FinalizeClusterDestroy(master_uuid):
825
  """Execute the last steps of cluster destroy
826

827
  This function shuts down all the daemons, completing the destroy
828
  begun in cmdlib.LUDestroyOpcode.
829

830
  """
831
  cfg = config.ConfigWriter()
832
  modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
833
  runner = rpc.BootstrapRunner()
834

    
835
  master_name = cfg.GetNodeName(master_uuid)
836

    
837
  master_params = cfg.GetMasterNetworkParameters()
838
  master_params.uuid = master_uuid
839
  ems = cfg.GetUseExternalMipScript()
840
  result = runner.call_node_deactivate_master_ip(master_name, master_params,
841
                                                 ems)
842

    
843
  msg = result.fail_msg
844
  if msg:
845
    logging.warning("Could not disable the master IP: %s", msg)
846

    
847
  result = runner.call_node_stop_master(master_name)
848
  msg = result.fail_msg
849
  if msg:
850
    logging.warning("Could not disable the master role: %s", msg)
851

    
852
  result = runner.call_node_leave_cluster(master_name, modify_ssh_setup)
853
  msg = result.fail_msg
854
  if msg:
855
    logging.warning("Could not shutdown the node daemon and cleanup"
856
                    " the node: %s", msg)
857

    
858

    
859
def SetupNodeDaemon(opts, cluster_name, node):
860
  """Add a node to the cluster.
861

862
  This function must be called before the actual opcode, and will ssh
863
  to the remote node, copy the needed files, and start ganeti-noded,
864
  allowing the master to do the rest via normal rpc calls.
865

866
  @param cluster_name: the cluster name
867
  @param node: the name of the new node
868

869
  """
870
  data = {
871
    constants.NDS_CLUSTER_NAME: cluster_name,
872
    constants.NDS_NODE_DAEMON_CERTIFICATE:
873
      utils.ReadFile(pathutils.NODED_CERT_FILE),
874
    constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(),
875
    constants.NDS_START_NODE_DAEMON: True,
876
    }
877

    
878
  RunNodeSetupCmd(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
879
                  opts.debug, opts.verbose,
880
                  True, opts.ssh_key_check, opts.ssh_key_check, data)
881

    
882
  _WaitForNodeDaemon(node)
883

    
884

    
885
def MasterFailover(no_voting=False):
886
  """Failover the master node.
887

888
  This checks that we are not already the master, and will cause the
889
  current master to cease being master, and the non-master to become
890
  new master.
891

892
  @type no_voting: boolean
893
  @param no_voting: force the operation without remote nodes agreement
894
                      (dangerous)
895

896
  """
897
  sstore = ssconf.SimpleStore()
898

    
899
  old_master, new_master = ssconf.GetMasterAndMyself(sstore)
900
  node_names = sstore.GetNodeList()
901
  mc_list = sstore.GetMasterCandidates()
902

    
903
  if old_master == new_master:
904
    raise errors.OpPrereqError("This commands must be run on the node"
905
                               " where you want the new master to be."
906
                               " %s is already the master" %
907
                               old_master, errors.ECODE_INVAL)
908

    
909
  if new_master not in mc_list:
910
    mc_no_master = [name for name in mc_list if name != old_master]
911
    raise errors.OpPrereqError("This node is not among the nodes marked"
912
                               " as master candidates. Only these nodes"
913
                               " can become masters. Current list of"
914
                               " master candidates is:\n"
915
                               "%s" % ("\n".join(mc_no_master)),
916
                               errors.ECODE_STATE)
917

    
918
  if not no_voting:
919
    vote_list = GatherMasterVotes(node_names)
920

    
921
    if vote_list:
922
      voted_master = vote_list[0][0]
923
      if voted_master is None:
924
        raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
925
                                   " not respond.", errors.ECODE_ENVIRON)
926
      elif voted_master != old_master:
927
        raise errors.OpPrereqError("I have a wrong configuration, I believe"
928
                                   " the master is %s but the other nodes"
929
                                   " voted %s. Please resync the configuration"
930
                                   " of this node." %
931
                                   (old_master, voted_master),
932
                                   errors.ECODE_STATE)
933
  # end checks
934

    
935
  rcode = 0
936

    
937
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
938

    
939
  try:
940
    # instantiate a real config writer, as we now know we have the
941
    # configuration data
942
    cfg = config.ConfigWriter(accept_foreign=True)
943

    
944
    old_master_node = cfg.GetNodeInfoByName(old_master)
945
    if old_master_node is None:
946
      raise errors.OpPrereqError("Could not find old master node '%s' in"
947
                                 " cluster configuration." % old_master,
948
                                 errors.ECODE_NOENT)
949

    
950
    cluster_info = cfg.GetClusterInfo()
951
    new_master_node = cfg.GetNodeInfoByName(new_master)
952
    if new_master_node is None:
953
      raise errors.OpPrereqError("Could not find new master node '%s' in"
954
                                 " cluster configuration." % new_master,
955
                                 errors.ECODE_NOENT)
956

    
957
    cluster_info.master_node = new_master_node.uuid
958
    # this will also regenerate the ssconf files, since we updated the
959
    # cluster info
960
    cfg.Update(cluster_info, logging.error)
961
  except errors.ConfigurationError, err:
962
    logging.error("Error while trying to set the new master: %s",
963
                  str(err))
964
    return 1
965

    
966
  # if cfg.Update worked, then it means the old master daemon won't be
967
  # able now to write its own config file (we rely on locking in both
968
  # backend.UploadFile() and ConfigWriter._Write(); hence the next
969
  # step is to kill the old master
970

    
971
  logging.info("Stopping the master daemon on node %s", old_master)
972

    
973
  runner = rpc.BootstrapRunner()
974
  master_params = cfg.GetMasterNetworkParameters()
975
  master_params.uuid = old_master_node.uuid
976
  ems = cfg.GetUseExternalMipScript()
977
  result = runner.call_node_deactivate_master_ip(old_master,
978
                                                 master_params, ems)
979

    
980
  msg = result.fail_msg
981
  if msg:
982
    logging.warning("Could not disable the master IP: %s", msg)
983

    
984
  result = runner.call_node_stop_master(old_master)
985
  msg = result.fail_msg
986
  if msg:
987
    logging.error("Could not disable the master role on the old master"
988
                  " %s, please disable manually: %s", old_master, msg)
989

    
990
  logging.info("Checking master IP non-reachability...")
991

    
992
  master_ip = sstore.GetMasterIP()
993
  total_timeout = 30
994

    
995
  # Here we have a phase where no master should be running
996
  def _check_ip():
997
    if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
998
      raise utils.RetryAgain()
999

    
1000
  try:
1001
    utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
1002
  except utils.RetryTimeout:
1003
    logging.warning("The master IP is still reachable after %s seconds,"
1004
                    " continuing but activating the master on the current"
1005
                    " node will probably fail", total_timeout)
1006

    
1007
  if jstore.CheckDrainFlag():
1008
    logging.info("Undraining job queue")
1009
    jstore.SetDrainFlag(False)
1010

    
1011
  logging.info("Starting the master daemons on the new master")
1012

    
1013
  result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
1014
                                                                no_voting)
1015
  msg = result.fail_msg
1016
  if msg:
1017
    logging.error("Could not start the master role on the new master"
1018
                  " %s, please check: %s", new_master, msg)
1019
    rcode = 1
1020

    
1021
  logging.info("Master failed over from %s to %s", old_master, new_master)
1022
  return rcode
1023

    
1024

    
1025
def GetMaster():
1026
  """Returns the current master node.
1027

1028
  This is a separate function in bootstrap since it's needed by
1029
  gnt-cluster, and instead of importing directly ssconf, it's better
1030
  to abstract it in bootstrap, where we do use ssconf in other
1031
  functions too.
1032

1033
  """
1034
  sstore = ssconf.SimpleStore()
1035

    
1036
  old_master, _ = ssconf.GetMasterAndMyself(sstore)
1037

    
1038
  return old_master
1039

    
1040

    
1041
def GatherMasterVotes(node_names):
1042
  """Check the agreement on who is the master.
1043

1044
  This function will return a list of (node, number of votes), ordered
1045
  by the number of votes. Errors will be denoted by the key 'None'.
1046

1047
  Note that the sum of votes is the number of nodes this machine
1048
  knows, whereas the number of entries in the list could be different
1049
  (if some nodes vote for another master).
1050

1051
  We remove ourselves from the list since we know that (bugs aside)
1052
  since we use the same source for configuration information for both
1053
  backend and boostrap, we'll always vote for ourselves.
1054

1055
  @type node_names: list
1056
  @param node_names: the list of nodes to query for master info; the current
1057
      node will be removed if it is in the list
1058
  @rtype: list
1059
  @return: list of (node, votes)
1060

1061
  """
1062
  myself = netutils.Hostname.GetSysName()
1063
  try:
1064
    node_names.remove(myself)
1065
  except ValueError:
1066
    pass
1067
  if not node_names:
1068
    # no nodes left (eventually after removing myself)
1069
    return []
1070
  results = rpc.BootstrapRunner().call_master_info(node_names)
1071
  if not isinstance(results, dict):
1072
    # this should not happen (unless internal error in rpc)
1073
    logging.critical("Can't complete rpc call, aborting master startup")
1074
    return [(None, len(node_names))]
1075
  votes = {}
1076
  for node_name in results:
1077
    nres = results[node_name]
1078
    data = nres.payload
1079
    msg = nres.fail_msg
1080
    fail = False
1081
    if msg:
1082
      logging.warning("Error contacting node %s: %s", node_name, msg)
1083
      fail = True
1084
    # for now we accept both length 3, 4 and 5 (data[3] is primary ip version
1085
    # and data[4] is the master netmask)
1086
    elif not isinstance(data, (tuple, list)) or len(data) < 3:
1087
      logging.warning("Invalid data received from node %s: %s",
1088
                      node_name, data)
1089
      fail = True
1090
    if fail:
1091
      if None not in votes:
1092
        votes[None] = 0
1093
      votes[None] += 1
1094
      continue
1095
    master_node = data[2]
1096
    if master_node not in votes:
1097
      votes[master_node] = 0
1098
    votes[master_node] += 1
1099

    
1100
  vote_list = [v for v in votes.items()]
1101
  # sort first on number of votes then on name, since we want None
1102
  # sorted later if we have the half of the nodes not responding, and
1103
  # half voting all for the same master
1104
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
1105

    
1106
  return vote_list