Statistics
| Branch: | Tag: | Revision:

root / lib / bootstrap.py @ 822a50c4

History | View | Annotate | Download (32.3 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions to bootstrap a new cluster.
23

24
"""
25

    
26
import os
27
import os.path
28
import re
29
import logging
30
import time
31

    
32
from ganeti import rpc
33
from ganeti import ssh
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import config
37
from ganeti import constants
38
from ganeti import objects
39
from ganeti import ssconf
40
from ganeti import serializer
41
from ganeti import hypervisor
42
from ganeti import bdev
43
from ganeti import netutils
44
from ganeti import backend
45
from ganeti import luxi
46
from ganeti import jstore
47

    
48

    
49
# ec_id for InitConfig's temporary reservation manager
50
_INITCONF_ECID = "initconfig-ecid"
51

    
52
#: After how many seconds daemon must be responsive
53
_DAEMON_READY_TIMEOUT = 10.0
54

    
55

    
56
def _InitSSHSetup():
57
  """Setup the SSH configuration for the cluster.
58

59
  This generates a dsa keypair for root, adds the pub key to the
60
  permitted hosts and adds the hostkey to its own known hosts.
61

62
  """
63
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
64

    
65
  for name in priv_key, pub_key:
66
    if os.path.exists(name):
67
      utils.CreateBackup(name)
68
    utils.RemoveFile(name)
69

    
70
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
71
                         "-f", priv_key,
72
                         "-q", "-N", ""])
73
  if result.failed:
74
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
75
                             result.output)
76

    
77
  utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
78

    
79

    
80
def GenerateHmacKey(file_name):
81
  """Writes a new HMAC key.
82

83
  @type file_name: str
84
  @param file_name: Path to output file
85

86
  """
87
  utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
88
                  backup=True)
89

    
90

    
91
def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
92
                          new_confd_hmac_key, new_cds,
93
                          rapi_cert_pem=None, spice_cert_pem=None,
94
                          spice_cacert_pem=None, cds=None,
95
                          nodecert_file=constants.NODED_CERT_FILE,
96
                          rapicert_file=constants.RAPI_CERT_FILE,
97
                          spicecert_file=constants.SPICE_CERT_FILE,
98
                          spicecacert_file=constants.SPICE_CACERT_FILE,
99
                          hmackey_file=constants.CONFD_HMAC_KEY,
100
                          cds_file=constants.CLUSTER_DOMAIN_SECRET_FILE):
101
  """Updates the cluster certificates, keys and secrets.
102

103
  @type new_cluster_cert: bool
104
  @param new_cluster_cert: Whether to generate a new cluster certificate
105
  @type new_rapi_cert: bool
106
  @param new_rapi_cert: Whether to generate a new RAPI certificate
107
  @type new_spice_cert: bool
108
  @param new_spice_cert: Whether to generate a new SPICE certificate
109
  @type new_confd_hmac_key: bool
110
  @param new_confd_hmac_key: Whether to generate a new HMAC key
111
  @type new_cds: bool
112
  @param new_cds: Whether to generate a new cluster domain secret
113
  @type rapi_cert_pem: string
114
  @param rapi_cert_pem: New RAPI certificate in PEM format
115
  @type spice_cert_pem: string
116
  @param spice_cert_pem: New SPICE certificate in PEM format
117
  @type spice_cacert_pem: string
118
  @param spice_cacert_pem: Certificate of the CA that signed the SPICE
119
                           certificate, in PEM format
120
  @type cds: string
121
  @param cds: New cluster domain secret
122
  @type nodecert_file: string
123
  @param nodecert_file: optional override of the node cert file path
124
  @type rapicert_file: string
125
  @param rapicert_file: optional override of the rapi cert file path
126
  @type spicecert_file: string
127
  @param spicecert_file: optional override of the spice cert file path
128
  @type spicecacert_file: string
129
  @param spicecacert_file: optional override of the spice CA cert file path
130
  @type hmackey_file: string
131
  @param hmackey_file: optional override of the hmac key file path
132

133
  """
134
  # noded SSL certificate
135
  cluster_cert_exists = os.path.exists(nodecert_file)
136
  if new_cluster_cert or not cluster_cert_exists:
137
    if cluster_cert_exists:
138
      utils.CreateBackup(nodecert_file)
139

    
140
    logging.debug("Generating new cluster certificate at %s", nodecert_file)
141
    utils.GenerateSelfSignedSslCert(nodecert_file)
142

    
143
  # confd HMAC key
144
  if new_confd_hmac_key or not os.path.exists(hmackey_file):
145
    logging.debug("Writing new confd HMAC key to %s", hmackey_file)
146
    GenerateHmacKey(hmackey_file)
147

    
148
  # RAPI
149
  rapi_cert_exists = os.path.exists(rapicert_file)
150

    
151
  if rapi_cert_pem:
152
    # Assume rapi_pem contains a valid PEM-formatted certificate and key
153
    logging.debug("Writing RAPI certificate at %s", rapicert_file)
154
    utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
155

    
156
  elif new_rapi_cert or not rapi_cert_exists:
157
    if rapi_cert_exists:
158
      utils.CreateBackup(rapicert_file)
159

    
160
    logging.debug("Generating new RAPI certificate at %s", rapicert_file)
161
    utils.GenerateSelfSignedSslCert(rapicert_file)
162

    
163
  # SPICE
164
  spice_cert_exists = os.path.exists(spicecert_file)
165
  spice_cacert_exists = os.path.exists(spicecacert_file)
166
  if spice_cert_pem:
167
    # spice_cert_pem implies also spice_cacert_pem
168
    logging.debug("Writing SPICE certificate at %s", spicecert_file)
169
    utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
170
    logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
171
    utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
172
  elif new_spice_cert or not spice_cert_exists:
173
    if spice_cert_exists:
174
      utils.CreateBackup(spicecert_file)
175
    if spice_cacert_exists:
176
      utils.CreateBackup(spicecacert_file)
177

    
178
    logging.debug("Generating new self-signed SPICE certificate at %s",
179
                  spicecert_file)
180
    (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file)
181

    
182
    # Self-signed certificate -> the public certificate is also the CA public
183
    # certificate
184
    logging.debug("Writing the public certificate to %s",
185
                  spicecert_file)
186
    utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
187

    
188
  # Cluster domain secret
189
  if cds:
190
    logging.debug("Writing cluster domain secret to %s", cds_file)
191
    utils.WriteFile(cds_file, data=cds, backup=True)
192

    
193
  elif new_cds or not os.path.exists(cds_file):
194
    logging.debug("Generating new cluster domain secret at %s", cds_file)
195
    GenerateHmacKey(cds_file)
196

    
197

    
198
def _InitGanetiServerSetup(master_name):
199
  """Setup the necessary configuration for the initial node daemon.
200

201
  This creates the nodepass file containing the shared password for
202
  the cluster, generates the SSL certificate and starts the node daemon.
203

204
  @type master_name: str
205
  @param master_name: Name of the master node
206

207
  """
208
  # Generate cluster secrets
209
  GenerateClusterCrypto(True, False, False, False, False)
210

    
211
  result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
212
  if result.failed:
213
    raise errors.OpExecError("Could not start the node daemon, command %s"
214
                             " had exitcode %s and error %s" %
215
                             (result.cmd, result.exit_code, result.output))
216

    
217
  _WaitForNodeDaemon(master_name)
218

    
219

    
220
def _WaitForNodeDaemon(node_name):
221
  """Wait for node daemon to become responsive.
222

223
  """
224
  def _CheckNodeDaemon():
225
    result = rpc.BootstrapRunner().call_version([node_name])[node_name]
226
    if result.fail_msg:
227
      raise utils.RetryAgain()
228

    
229
  try:
230
    utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
231
  except utils.RetryTimeout:
232
    raise errors.OpExecError("Node daemon on %s didn't answer queries within"
233
                             " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
234

    
235

    
236
def _WaitForMasterDaemon():
237
  """Wait for master daemon to become responsive.
238

239
  """
240
  def _CheckMasterDaemon():
241
    try:
242
      cl = luxi.Client()
243
      (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
244
    except Exception:
245
      raise utils.RetryAgain()
246

    
247
    logging.debug("Received cluster name %s from master", cluster_name)
248

    
249
  try:
250
    utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
251
  except utils.RetryTimeout:
252
    raise errors.OpExecError("Master daemon didn't answer queries within"
253
                             " %s seconds" % _DAEMON_READY_TIMEOUT)
254

    
255

    
256
def _InitFileStorage(file_storage_dir):
257
  """Initialize if needed the file storage.
258

259
  @param file_storage_dir: the user-supplied value
260
  @return: either empty string (if file storage was disabled at build
261
      time) or the normalized path to the storage directory
262

263
  """
264
  file_storage_dir = os.path.normpath(file_storage_dir)
265

    
266
  if not os.path.isabs(file_storage_dir):
267
    raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
268
                               " path" % file_storage_dir, errors.ECODE_INVAL)
269

    
270
  if not os.path.exists(file_storage_dir):
271
    try:
272
      os.makedirs(file_storage_dir, 0750)
273
    except OSError, err:
274
      raise errors.OpPrereqError("Cannot create file storage directory"
275
                                 " '%s': %s" % (file_storage_dir, err),
276
                                 errors.ECODE_ENVIRON)
277

    
278
  if not os.path.isdir(file_storage_dir):
279
    raise errors.OpPrereqError("The file storage directory '%s' is not"
280
                               " a directory." % file_storage_dir,
281
                               errors.ECODE_ENVIRON)
282
  return file_storage_dir
283

    
284

    
285
def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914
286
                master_netmask, master_netdev, file_storage_dir,
287
                shared_file_storage_dir, candidate_pool_size, secondary_ip=None,
288
                vg_name=None, beparams=None, nicparams=None, ndparams=None,
289
                hvparams=None, diskparams=None, enabled_hypervisors=None,
290
                modify_etc_hosts=True, modify_ssh_setup=True,
291
                maintain_node_health=False, drbd_helper=None, uid_pool=None,
292
                default_iallocator=None, primary_ip_version=None, ipolicy=None,
293
                prealloc_wipe_disks=False, use_external_mip_script=False):
294
  """Initialise the cluster.
295

296
  @type candidate_pool_size: int
297
  @param candidate_pool_size: master candidate pool size
298

299
  """
300
  # TODO: complete the docstring
301
  if config.ConfigWriter.IsCluster():
302
    raise errors.OpPrereqError("Cluster is already initialised",
303
                               errors.ECODE_STATE)
304

    
305
  if not enabled_hypervisors:
306
    raise errors.OpPrereqError("Enabled hypervisors list must contain at"
307
                               " least one member", errors.ECODE_INVAL)
308
  invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
309
  if invalid_hvs:
310
    raise errors.OpPrereqError("Enabled hypervisors contains invalid"
311
                               " entries: %s" % invalid_hvs,
312
                               errors.ECODE_INVAL)
313

    
314
  try:
315
    ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
316
  except errors.ProgrammerError:
317
    raise errors.OpPrereqError("Invalid primary ip version: %d." %
318
                               primary_ip_version)
319

    
320
  hostname = netutils.GetHostname(family=ipcls.family)
321
  if not ipcls.IsValid(hostname.ip):
322
    raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
323
                               " address." % (hostname.ip, primary_ip_version))
324

    
325
  if ipcls.IsLoopback(hostname.ip):
326
    raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
327
                               " address. Please fix DNS or %s." %
328
                               (hostname.ip, constants.ETC_HOSTS),
329
                               errors.ECODE_ENVIRON)
330

    
331
  if not ipcls.Own(hostname.ip):
332
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
333
                               " to %s,\nbut this ip address does not"
334
                               " belong to this host" %
335
                               hostname.ip, errors.ECODE_ENVIRON)
336

    
337
  clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
338

    
339
  if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
340
    raise errors.OpPrereqError("Cluster IP already active",
341
                               errors.ECODE_NOTUNIQUE)
342

    
343
  if not secondary_ip:
344
    if primary_ip_version == constants.IP6_VERSION:
345
      raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
346
                                 " IPv4 address must be given as secondary",
347
                                 errors.ECODE_INVAL)
348
    secondary_ip = hostname.ip
349

    
350
  if not netutils.IP4Address.IsValid(secondary_ip):
351
    raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
352
                               " IPv4 address." % secondary_ip,
353
                               errors.ECODE_INVAL)
354

    
355
  if not netutils.IP4Address.Own(secondary_ip):
356
    raise errors.OpPrereqError("You gave %s as secondary IP,"
357
                               " but it does not belong to this host." %
358
                               secondary_ip, errors.ECODE_ENVIRON)
359

    
360
  if master_netmask is not None:
361
    if not ipcls.ValidateNetmask(master_netmask):
362
      raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
363
                                  (master_netmask, primary_ip_version))
364
  else:
365
    master_netmask = ipcls.iplen
366

    
367
  if vg_name is not None:
368
    # Check if volume group is valid
369
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
370
                                          constants.MIN_VG_SIZE)
371
    if vgstatus:
372
      raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
373
                                 " you are not using lvm" % vgstatus,
374
                                 errors.ECODE_INVAL)
375

    
376
  if drbd_helper is not None:
377
    try:
378
      curr_helper = bdev.BaseDRBD.GetUsermodeHelper()
379
    except errors.BlockDeviceError, err:
380
      raise errors.OpPrereqError("Error while checking drbd helper"
381
                                 " (specify --no-drbd-storage if you are not"
382
                                 " using drbd): %s" % str(err),
383
                                 errors.ECODE_ENVIRON)
384
    if drbd_helper != curr_helper:
385
      raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
386
                                 " is the current helper" % (drbd_helper,
387
                                                             curr_helper),
388
                                 errors.ECODE_INVAL)
389

    
390
  if constants.ENABLE_FILE_STORAGE:
391
    file_storage_dir = _InitFileStorage(file_storage_dir)
392
  else:
393
    file_storage_dir = ""
394

    
395
  if constants.ENABLE_SHARED_FILE_STORAGE:
396
    shared_file_storage_dir = _InitFileStorage(shared_file_storage_dir)
397
  else:
398
    shared_file_storage_dir = ""
399

    
400
  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
401
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
402
                               errors.ECODE_INVAL)
403

    
404
  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
405
  if result.failed:
406
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
407
                               (master_netdev,
408
                                result.output.strip()), errors.ECODE_INVAL)
409

    
410
  dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
411
  utils.EnsureDirs(dirs)
412

    
413
  objects.UpgradeBeParams(beparams)
414
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
415
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
416
  for key, val in ipolicy.items():
417
    if key not in constants.IPOLICY_PARAMETERS:
418
      raise errors.OpPrereqError("'%s' is not a valid key for instance policy"
419
                                 " description", key)
420
    utils.ForceDictType(val, constants.ISPECS_PARAMETER_TYPES)
421

    
422
  objects.NIC.CheckParameterSyntax(nicparams)
423
  full_ipolicy = objects.FillDictOfDicts(constants.IPOLICY_DEFAULTS,
424
                                         ipolicy)
425
  objects.InstancePolicy.CheckParameterSyntax(full_ipolicy)
426

    
427
  if ndparams is not None:
428
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
429
  else:
430
    ndparams = dict(constants.NDC_DEFAULTS)
431

    
432
  # hvparams is a mapping of hypervisor->hvparams dict
433
  for hv_name, hv_params in hvparams.iteritems():
434
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
435
    hv_class = hypervisor.GetHypervisor(hv_name)
436
    hv_class.CheckParameterSyntax(hv_params)
437

    
438
  # diskparams is a mapping of disk-template->diskparams dict
439
  for template, dt_params in diskparams.items():
440
    param_keys = set(dt_params.keys())
441
    default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
442
    if not (param_keys <= default_param_keys):
443
      unknown_params = param_keys - default_param_keys
444
      raise errors.OpPrereqError("Invalid parameters for disk template %s:"
445
                                 " %s" % (template,
446
                                          utils.CommaJoin(unknown_params)))
447
    utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
448

    
449
  # set up ssh config and /etc/hosts
450
  sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
451
  sshkey = sshline.split(" ")[1]
452

    
453
  if modify_etc_hosts:
454
    utils.AddHostToEtcHosts(hostname.name, hostname.ip)
455

    
456
  if modify_ssh_setup:
457
    _InitSSHSetup()
458

    
459
  if default_iallocator is not None:
460
    alloc_script = utils.FindFile(default_iallocator,
461
                                  constants.IALLOCATOR_SEARCH_PATH,
462
                                  os.path.isfile)
463
    if alloc_script is None:
464
      raise errors.OpPrereqError("Invalid default iallocator script '%s'"
465
                                 " specified" % default_iallocator,
466
                                 errors.ECODE_INVAL)
467
  elif constants.HTOOLS:
468
    # htools was enabled at build-time, we default to it
469
    if utils.FindFile(constants.IALLOC_HAIL,
470
                      constants.IALLOCATOR_SEARCH_PATH,
471
                      os.path.isfile):
472
      default_iallocator = constants.IALLOC_HAIL
473

    
474
  now = time.time()
475

    
476
  # init of cluster config file
477
  cluster_config = objects.Cluster(
478
    serial_no=1,
479
    rsahostkeypub=sshkey,
480
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
481
    mac_prefix=mac_prefix,
482
    volume_group_name=vg_name,
483
    tcpudp_port_pool=set(),
484
    master_node=hostname.name,
485
    master_ip=clustername.ip,
486
    master_netmask=master_netmask,
487
    master_netdev=master_netdev,
488
    cluster_name=clustername.name,
489
    file_storage_dir=file_storage_dir,
490
    shared_file_storage_dir=shared_file_storage_dir,
491
    enabled_hypervisors=enabled_hypervisors,
492
    beparams={constants.PP_DEFAULT: beparams},
493
    nicparams={constants.PP_DEFAULT: nicparams},
494
    ndparams=ndparams,
495
    hvparams=hvparams,
496
    diskparams=diskparams,
497
    candidate_pool_size=candidate_pool_size,
498
    modify_etc_hosts=modify_etc_hosts,
499
    modify_ssh_setup=modify_ssh_setup,
500
    uid_pool=uid_pool,
501
    ctime=now,
502
    mtime=now,
503
    maintain_node_health=maintain_node_health,
504
    drbd_usermode_helper=drbd_helper,
505
    default_iallocator=default_iallocator,
506
    primary_ip_family=ipcls.family,
507
    prealloc_wipe_disks=prealloc_wipe_disks,
508
    use_external_mip_script=use_external_mip_script,
509
    ipolicy=ipolicy
510
    )
511
  master_node_config = objects.Node(name=hostname.name,
512
                                    primary_ip=hostname.ip,
513
                                    secondary_ip=secondary_ip,
514
                                    serial_no=1,
515
                                    master_candidate=True,
516
                                    offline=False, drained=False,
517
                                    ctime=now, mtime=now,
518
                                    )
519
  InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
520
  cfg = config.ConfigWriter(offline=True)
521
  ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
522
  cfg.Update(cfg.GetClusterInfo(), logging.error)
523
  backend.WriteSsconfFiles(cfg.GetSsconfValues())
524

    
525
  # set up the inter-node password and certificate
526
  _InitGanetiServerSetup(hostname.name)
527

    
528
  logging.debug("Starting daemons")
529
  result = utils.RunCmd([constants.DAEMON_UTIL, "start-all"])
530
  if result.failed:
531
    raise errors.OpExecError("Could not start daemons, command %s"
532
                             " had exitcode %s and error %s" %
533
                             (result.cmd, result.exit_code, result.output))
534

    
535
  _WaitForMasterDaemon()
536

    
537

    
538
def InitConfig(version, cluster_config, master_node_config,
539
               cfg_file=constants.CLUSTER_CONF_FILE):
540
  """Create the initial cluster configuration.
541

542
  It will contain the current node, which will also be the master
543
  node, and no instances.
544

545
  @type version: int
546
  @param version: configuration version
547
  @type cluster_config: L{objects.Cluster}
548
  @param cluster_config: cluster configuration
549
  @type master_node_config: L{objects.Node}
550
  @param master_node_config: master node configuration
551
  @type cfg_file: string
552
  @param cfg_file: configuration file path
553

554
  """
555
  uuid_generator = config.TemporaryReservationManager()
556
  cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
557
                                                _INITCONF_ECID)
558
  master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
559
                                                    _INITCONF_ECID)
560
  nodes = {
561
    master_node_config.name: master_node_config,
562
    }
563
  default_nodegroup = objects.NodeGroup(
564
    uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
565
    name=constants.INITIAL_NODE_GROUP_NAME,
566
    members=[master_node_config.name],
567
    diskparams=cluster_config.diskparams,
568
    )
569
  nodegroups = {
570
    default_nodegroup.uuid: default_nodegroup,
571
    }
572
  now = time.time()
573
  config_data = objects.ConfigData(version=version,
574
                                   cluster=cluster_config,
575
                                   nodegroups=nodegroups,
576
                                   nodes=nodes,
577
                                   instances={},
578
                                   serial_no=1,
579
                                   ctime=now, mtime=now)
580
  utils.WriteFile(cfg_file,
581
                  data=serializer.Dump(config_data.ToDict()),
582
                  mode=0600)
583

    
584

    
585
def FinalizeClusterDestroy(master):
586
  """Execute the last steps of cluster destroy
587

588
  This function shuts down all the daemons, completing the destroy
589
  begun in cmdlib.LUDestroyOpcode.
590

591
  """
592
  cfg = config.ConfigWriter()
593
  modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
594
  runner = rpc.BootstrapRunner()
595

    
596
  master_params = cfg.GetMasterNetworkParameters()
597
  master_params.name = master
598
  ems = cfg.GetUseExternalMipScript()
599
  result = runner.call_node_deactivate_master_ip(master_params.name,
600
                                                 master_params, ems)
601

    
602
  msg = result.fail_msg
603
  if msg:
604
    logging.warning("Could not disable the master IP: %s", msg)
605

    
606
  result = runner.call_node_stop_master(master)
607
  msg = result.fail_msg
608
  if msg:
609
    logging.warning("Could not disable the master role: %s", msg)
610

    
611
  result = runner.call_node_leave_cluster(master, modify_ssh_setup)
612
  msg = result.fail_msg
613
  if msg:
614
    logging.warning("Could not shutdown the node daemon and cleanup"
615
                    " the node: %s", msg)
616

    
617

    
618
def SetupNodeDaemon(cluster_name, node, ssh_key_check):
619
  """Add a node to the cluster.
620

621
  This function must be called before the actual opcode, and will ssh
622
  to the remote node, copy the needed files, and start ganeti-noded,
623
  allowing the master to do the rest via normal rpc calls.
624

625
  @param cluster_name: the cluster name
626
  @param node: the name of the new node
627
  @param ssh_key_check: whether to do a strict key check
628

629
  """
630
  family = ssconf.SimpleStore().GetPrimaryIPFamily()
631
  sshrunner = ssh.SshRunner(cluster_name,
632
                            ipv6=(family == netutils.IP6Address.family))
633

    
634
  bind_address = constants.IP4_ADDRESS_ANY
635
  if family == netutils.IP6Address.family:
636
    bind_address = constants.IP6_ADDRESS_ANY
637

    
638
  # set up inter-node password and certificate and restarts the node daemon
639
  # and then connect with ssh to set password and start ganeti-noded
640
  # note that all the below variables are sanitized at this point,
641
  # either by being constants or by the checks above
642
  sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE)
643
  sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE)
644
  sshrunner.CopyFileToNode(node, constants.SPICE_CERT_FILE)
645
  sshrunner.CopyFileToNode(node, constants.SPICE_CACERT_FILE)
646
  sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY)
647
  mycommand = ("%s stop-all; %s start %s -b %s" %
648
               (constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED,
649
                utils.ShellQuote(bind_address)))
650

    
651
  result = sshrunner.Run(node, 'root', mycommand, batch=False,
652
                         ask_key=ssh_key_check,
653
                         use_cluster_key=True,
654
                         strict_host_check=ssh_key_check)
655
  if result.failed:
656
    raise errors.OpExecError("Remote command on node %s, error: %s,"
657
                             " output: %s" %
658
                             (node, result.fail_reason, result.output))
659

    
660
  _WaitForNodeDaemon(node)
661

    
662

    
663
def MasterFailover(no_voting=False):
664
  """Failover the master node.
665

666
  This checks that we are not already the master, and will cause the
667
  current master to cease being master, and the non-master to become
668
  new master.
669

670
  @type no_voting: boolean
671
  @param no_voting: force the operation without remote nodes agreement
672
                      (dangerous)
673

674
  """
675
  sstore = ssconf.SimpleStore()
676

    
677
  old_master, new_master = ssconf.GetMasterAndMyself(sstore)
678
  node_list = sstore.GetNodeList()
679
  mc_list = sstore.GetMasterCandidates()
680

    
681
  if old_master == new_master:
682
    raise errors.OpPrereqError("This commands must be run on the node"
683
                               " where you want the new master to be."
684
                               " %s is already the master" %
685
                               old_master, errors.ECODE_INVAL)
686

    
687
  if new_master not in mc_list:
688
    mc_no_master = [name for name in mc_list if name != old_master]
689
    raise errors.OpPrereqError("This node is not among the nodes marked"
690
                               " as master candidates. Only these nodes"
691
                               " can become masters. Current list of"
692
                               " master candidates is:\n"
693
                               "%s" % ('\n'.join(mc_no_master)),
694
                               errors.ECODE_STATE)
695

    
696
  if not no_voting:
697
    vote_list = GatherMasterVotes(node_list)
698

    
699
    if vote_list:
700
      voted_master = vote_list[0][0]
701
      if voted_master is None:
702
        raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
703
                                   " not respond.", errors.ECODE_ENVIRON)
704
      elif voted_master != old_master:
705
        raise errors.OpPrereqError("I have a wrong configuration, I believe"
706
                                   " the master is %s but the other nodes"
707
                                   " voted %s. Please resync the configuration"
708
                                   " of this node." %
709
                                   (old_master, voted_master),
710
                                   errors.ECODE_STATE)
711
  # end checks
712

    
713
  rcode = 0
714

    
715
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
716

    
717
  try:
718
    # instantiate a real config writer, as we now know we have the
719
    # configuration data
720
    cfg = config.ConfigWriter(accept_foreign=True)
721

    
722
    cluster_info = cfg.GetClusterInfo()
723
    cluster_info.master_node = new_master
724
    # this will also regenerate the ssconf files, since we updated the
725
    # cluster info
726
    cfg.Update(cluster_info, logging.error)
727
  except errors.ConfigurationError, err:
728
    logging.error("Error while trying to set the new master: %s",
729
                  str(err))
730
    return 1
731

    
732
  # if cfg.Update worked, then it means the old master daemon won't be
733
  # able now to write its own config file (we rely on locking in both
734
  # backend.UploadFile() and ConfigWriter._Write(); hence the next
735
  # step is to kill the old master
736

    
737
  logging.info("Stopping the master daemon on node %s", old_master)
738

    
739
  runner = rpc.BootstrapRunner()
740
  master_params = cfg.GetMasterNetworkParameters()
741
  master_params.name = old_master
742
  ems = cfg.GetUseExternalMipScript()
743
  result = runner.call_node_deactivate_master_ip(master_params.name,
744
                                                 master_params, ems)
745

    
746
  msg = result.fail_msg
747
  if msg:
748
    logging.warning("Could not disable the master IP: %s", msg)
749

    
750
  result = runner.call_node_stop_master(old_master)
751
  msg = result.fail_msg
752
  if msg:
753
    logging.error("Could not disable the master role on the old master"
754
                 " %s, please disable manually: %s", old_master, msg)
755

    
756
  logging.info("Checking master IP non-reachability...")
757

    
758
  master_ip = sstore.GetMasterIP()
759
  total_timeout = 30
760

    
761
  # Here we have a phase where no master should be running
762
  def _check_ip():
763
    if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
764
      raise utils.RetryAgain()
765

    
766
  try:
767
    utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
768
  except utils.RetryTimeout:
769
    logging.warning("The master IP is still reachable after %s seconds,"
770
                    " continuing but activating the master on the current"
771
                    " node will probably fail", total_timeout)
772

    
773
  if jstore.CheckDrainFlag():
774
    logging.info("Undraining job queue")
775
    jstore.SetDrainFlag(False)
776

    
777
  logging.info("Starting the master daemons on the new master")
778

    
779
  result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
780
                                                                no_voting)
781
  msg = result.fail_msg
782
  if msg:
783
    logging.error("Could not start the master role on the new master"
784
                  " %s, please check: %s", new_master, msg)
785
    rcode = 1
786

    
787
  logging.info("Master failed over from %s to %s", old_master, new_master)
788
  return rcode
789

    
790

    
791
def GetMaster():
792
  """Returns the current master node.
793

794
  This is a separate function in bootstrap since it's needed by
795
  gnt-cluster, and instead of importing directly ssconf, it's better
796
  to abstract it in bootstrap, where we do use ssconf in other
797
  functions too.
798

799
  """
800
  sstore = ssconf.SimpleStore()
801

    
802
  old_master, _ = ssconf.GetMasterAndMyself(sstore)
803

    
804
  return old_master
805

    
806

    
807
def GatherMasterVotes(node_list):
808
  """Check the agreement on who is the master.
809

810
  This function will return a list of (node, number of votes), ordered
811
  by the number of votes. Errors will be denoted by the key 'None'.
812

813
  Note that the sum of votes is the number of nodes this machine
814
  knows, whereas the number of entries in the list could be different
815
  (if some nodes vote for another master).
816

817
  We remove ourselves from the list since we know that (bugs aside)
818
  since we use the same source for configuration information for both
819
  backend and boostrap, we'll always vote for ourselves.
820

821
  @type node_list: list
822
  @param node_list: the list of nodes to query for master info; the current
823
      node will be removed if it is in the list
824
  @rtype: list
825
  @return: list of (node, votes)
826

827
  """
828
  myself = netutils.Hostname.GetSysName()
829
  try:
830
    node_list.remove(myself)
831
  except ValueError:
832
    pass
833
  if not node_list:
834
    # no nodes left (eventually after removing myself)
835
    return []
836
  results = rpc.BootstrapRunner().call_master_info(node_list)
837
  if not isinstance(results, dict):
838
    # this should not happen (unless internal error in rpc)
839
    logging.critical("Can't complete rpc call, aborting master startup")
840
    return [(None, len(node_list))]
841
  votes = {}
842
  for node in results:
843
    nres = results[node]
844
    data = nres.payload
845
    msg = nres.fail_msg
846
    fail = False
847
    if msg:
848
      logging.warning("Error contacting node %s: %s", node, msg)
849
      fail = True
850
    # for now we accept both length 3, 4 and 5 (data[3] is primary ip version
851
    # and data[4] is the master netmask)
852
    elif not isinstance(data, (tuple, list)) or len(data) < 3:
853
      logging.warning("Invalid data received from node %s: %s", node, data)
854
      fail = True
855
    if fail:
856
      if None not in votes:
857
        votes[None] = 0
858
      votes[None] += 1
859
      continue
860
    master_node = data[2]
861
    if master_node not in votes:
862
      votes[master_node] = 0
863
    votes[master_node] += 1
864

    
865
  vote_list = [v for v in votes.items()]
866
  # sort first on number of votes then on name, since we want None
867
  # sorted later if we have the half of the nodes not responding, and
868
  # half voting all for the same master
869
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
870

    
871
  return vote_list