Statistics
| Branch: | Tag: | Revision:

root / lib / bootstrap.py @ 47a6db9b

History | View | Annotate | Download (28.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions to bootstrap a new cluster.
23

24
"""
25

    
26
import os
27
import os.path
28
import re
29
import logging
30
import time
31

    
32
from ganeti import rpc
33
from ganeti import ssh
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import config
37
from ganeti import constants
38
from ganeti import objects
39
from ganeti import ssconf
40
from ganeti import serializer
41
from ganeti import hypervisor
42
from ganeti import bdev
43
from ganeti import netutils
44
from ganeti import backend
45
from ganeti import luxi
46

    
47

    
48
# ec_id for InitConfig's temporary reservation manager
49
_INITCONF_ECID = "initconfig-ecid"
50

    
51
#: After how many seconds daemon must be responsive
52
_DAEMON_READY_TIMEOUT = 10.0
53

    
54

    
55
def _InitSSHSetup():
56
  """Setup the SSH configuration for the cluster.
57

58
  This generates a dsa keypair for root, adds the pub key to the
59
  permitted hosts and adds the hostkey to its own known hosts.
60

61
  """
62
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
63

    
64
  for name in priv_key, pub_key:
65
    if os.path.exists(name):
66
      utils.CreateBackup(name)
67
    utils.RemoveFile(name)
68

    
69
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
70
                         "-f", priv_key,
71
                         "-q", "-N", ""])
72
  if result.failed:
73
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
74
                             result.output)
75

    
76
  utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
77

    
78

    
79
def GenerateHmacKey(file_name):
80
  """Writes a new HMAC key.
81

82
  @type file_name: str
83
  @param file_name: Path to output file
84

85
  """
86
  utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
87
                  backup=True)
88

    
89

    
90
def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_confd_hmac_key,
91
                          new_cds, rapi_cert_pem=None, cds=None,
92
                          nodecert_file=constants.NODED_CERT_FILE,
93
                          rapicert_file=constants.RAPI_CERT_FILE,
94
                          hmackey_file=constants.CONFD_HMAC_KEY,
95
                          cds_file=constants.CLUSTER_DOMAIN_SECRET_FILE):
96
  """Updates the cluster certificates, keys and secrets.
97

98
  @type new_cluster_cert: bool
99
  @param new_cluster_cert: Whether to generate a new cluster certificate
100
  @type new_rapi_cert: bool
101
  @param new_rapi_cert: Whether to generate a new RAPI certificate
102
  @type new_confd_hmac_key: bool
103
  @param new_confd_hmac_key: Whether to generate a new HMAC key
104
  @type new_cds: bool
105
  @param new_cds: Whether to generate a new cluster domain secret
106
  @type rapi_cert_pem: string
107
  @param rapi_cert_pem: New RAPI certificate in PEM format
108
  @type cds: string
109
  @param cds: New cluster domain secret
110
  @type nodecert_file: string
111
  @param nodecert_file: optional override of the node cert file path
112
  @type rapicert_file: string
113
  @param rapicert_file: optional override of the rapi cert file path
114
  @type hmackey_file: string
115
  @param hmackey_file: optional override of the hmac key file path
116

117
  """
118
  # noded SSL certificate
119
  cluster_cert_exists = os.path.exists(nodecert_file)
120
  if new_cluster_cert or not cluster_cert_exists:
121
    if cluster_cert_exists:
122
      utils.CreateBackup(nodecert_file)
123

    
124
    logging.debug("Generating new cluster certificate at %s", nodecert_file)
125
    utils.GenerateSelfSignedSslCert(nodecert_file)
126

    
127
  # confd HMAC key
128
  if new_confd_hmac_key or not os.path.exists(hmackey_file):
129
    logging.debug("Writing new confd HMAC key to %s", hmackey_file)
130
    GenerateHmacKey(hmackey_file)
131

    
132
  # RAPI
133
  rapi_cert_exists = os.path.exists(rapicert_file)
134

    
135
  if rapi_cert_pem:
136
    # Assume rapi_pem contains a valid PEM-formatted certificate and key
137
    logging.debug("Writing RAPI certificate at %s", rapicert_file)
138
    utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
139

    
140
  elif new_rapi_cert or not rapi_cert_exists:
141
    if rapi_cert_exists:
142
      utils.CreateBackup(rapicert_file)
143

    
144
    logging.debug("Generating new RAPI certificate at %s", rapicert_file)
145
    utils.GenerateSelfSignedSslCert(rapicert_file)
146

    
147
  # Cluster domain secret
148
  if cds:
149
    logging.debug("Writing cluster domain secret to %s", cds_file)
150
    utils.WriteFile(cds_file, data=cds, backup=True)
151

    
152
  elif new_cds or not os.path.exists(cds_file):
153
    logging.debug("Generating new cluster domain secret at %s", cds_file)
154
    GenerateHmacKey(cds_file)
155

    
156

    
157
def _InitGanetiServerSetup(master_name):
158
  """Setup the necessary configuration for the initial node daemon.
159

160
  This creates the nodepass file containing the shared password for
161
  the cluster, generates the SSL certificate and starts the node daemon.
162

163
  @type master_name: str
164
  @param master_name: Name of the master node
165

166
  """
167
  # Generate cluster secrets
168
  GenerateClusterCrypto(True, False, False, False)
169

    
170
  result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
171
  if result.failed:
172
    raise errors.OpExecError("Could not start the node daemon, command %s"
173
                             " had exitcode %s and error %s" %
174
                             (result.cmd, result.exit_code, result.output))
175

    
176
  _WaitForNodeDaemon(master_name)
177

    
178

    
179
def _WaitForNodeDaemon(node_name):
180
  """Wait for node daemon to become responsive.
181

182
  """
183
  def _CheckNodeDaemon():
184
    result = rpc.RpcRunner.call_version([node_name])[node_name]
185
    if result.fail_msg:
186
      raise utils.RetryAgain()
187

    
188
  try:
189
    utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
190
  except utils.RetryTimeout:
191
    raise errors.OpExecError("Node daemon on %s didn't answer queries within"
192
                             " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
193

    
194

    
195
def _WaitForMasterDaemon():
196
  """Wait for master daemon to become responsive.
197

198
  """
199
  def _CheckMasterDaemon():
200
    try:
201
      cl = luxi.Client()
202
      (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
203
    except Exception:
204
      raise utils.RetryAgain()
205

    
206
    logging.debug("Received cluster name %s from master", cluster_name)
207

    
208
  try:
209
    utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
210
  except utils.RetryTimeout:
211
    raise errors.OpExecError("Master daemon didn't answer queries within"
212
                             " %s seconds" % _DAEMON_READY_TIMEOUT)
213

    
214

    
215
def _InitFileStorage(file_storage_dir):
216
  """Initialize if needed the file storage.
217

218
  @param file_storage_dir: the user-supplied value
219
  @return: either empty string (if file storage was disabled at build
220
      time) or the normalized path to the storage directory
221

222
  """
223
  if not constants.ENABLE_FILE_STORAGE:
224
    return ""
225

    
226
  file_storage_dir = os.path.normpath(file_storage_dir)
227

    
228
  if not os.path.isabs(file_storage_dir):
229
    raise errors.OpPrereqError("The file storage directory you passed is"
230
                               " not an absolute path.", errors.ECODE_INVAL)
231

    
232
  if not os.path.exists(file_storage_dir):
233
    try:
234
      os.makedirs(file_storage_dir, 0750)
235
    except OSError, err:
236
      raise errors.OpPrereqError("Cannot create file storage directory"
237
                                 " '%s': %s" % (file_storage_dir, err),
238
                                 errors.ECODE_ENVIRON)
239

    
240
  if not os.path.isdir(file_storage_dir):
241
    raise errors.OpPrereqError("The file storage directory '%s' is not"
242
                               " a directory." % file_storage_dir,
243
                               errors.ECODE_ENVIRON)
244
  return file_storage_dir
245

    
246

    
247
def _InitSharedFileStorage(shared_file_storage_dir):
248
  """Initialize if needed the shared file storage.
249

250
  @param shared_file_storage_dir: the user-supplied value
251
  @return: either empty string (if file storage was disabled at build
252
      time) or the normalized path to the storage directory
253

254
  """
255
  if not constants.ENABLE_SHARED_FILE_STORAGE:
256
    return ""
257

    
258
  shared_file_storage_dir = os.path.normpath(shared_file_storage_dir)
259

    
260
  if not os.path.isabs(shared_file_storage_dir):
261
    raise errors.OpPrereqError("The shared file storage directory you"
262
                               " passed is not an absolute path.",
263
                               errors.ECODE_INVAL)
264

    
265
  if not os.path.exists(shared_file_storage_dir):
266
    try:
267
      os.makedirs(shared_file_storage_dir, 0750)
268
    except OSError, err:
269
      raise errors.OpPrereqError("Cannot create file storage directory"
270
                                 " '%s': %s" % (shared_file_storage_dir, err),
271
                                 errors.ECODE_ENVIRON)
272

    
273
  if not os.path.isdir(shared_file_storage_dir):
274
    raise errors.OpPrereqError("The file storage directory '%s' is not"
275
                               " a directory." % shared_file_storage_dir,
276
                               errors.ECODE_ENVIRON)
277
  return shared_file_storage_dir
278

    
279

    
280
def InitCluster(cluster_name, mac_prefix, # pylint: disable-msg=R0913
281
                master_netdev, file_storage_dir, shared_file_storage_dir,
282
                candidate_pool_size, secondary_ip=None, vg_name=None,
283
                beparams=None, nicparams=None, ndparams=None, hvparams=None,
284
                enabled_hypervisors=None, modify_etc_hosts=True,
285
                modify_ssh_setup=True, maintain_node_health=False,
286
                drbd_helper=None, uid_pool=None, default_iallocator=None,
287
                primary_ip_version=None, prealloc_wipe_disks=False):
288
  """Initialise the cluster.
289

290
  @type candidate_pool_size: int
291
  @param candidate_pool_size: master candidate pool size
292

293
  """
294
  # TODO: complete the docstring
295
  if config.ConfigWriter.IsCluster():
296
    raise errors.OpPrereqError("Cluster is already initialised",
297
                               errors.ECODE_STATE)
298

    
299
  if not enabled_hypervisors:
300
    raise errors.OpPrereqError("Enabled hypervisors list must contain at"
301
                               " least one member", errors.ECODE_INVAL)
302
  invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
303
  if invalid_hvs:
304
    raise errors.OpPrereqError("Enabled hypervisors contains invalid"
305
                               " entries: %s" % invalid_hvs,
306
                               errors.ECODE_INVAL)
307

    
308

    
309
  ipcls = None
310
  if primary_ip_version == constants.IP4_VERSION:
311
    ipcls = netutils.IP4Address
312
  elif primary_ip_version == constants.IP6_VERSION:
313
    ipcls = netutils.IP6Address
314
  else:
315
    raise errors.OpPrereqError("Invalid primary ip version: %d." %
316
                               primary_ip_version)
317

    
318
  hostname = netutils.GetHostname(family=ipcls.family)
319
  if not ipcls.IsValid(hostname.ip):
320
    raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
321
                               " address." % (hostname.ip, primary_ip_version))
322

    
323
  if ipcls.IsLoopback(hostname.ip):
324
    raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
325
                               " address. Please fix DNS or %s." %
326
                               (hostname.ip, constants.ETC_HOSTS),
327
                               errors.ECODE_ENVIRON)
328

    
329
  if not ipcls.Own(hostname.ip):
330
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
331
                               " to %s,\nbut this ip address does not"
332
                               " belong to this host" %
333
                               hostname.ip, errors.ECODE_ENVIRON)
334

    
335
  clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
336

    
337
  if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
338
    raise errors.OpPrereqError("Cluster IP already active",
339
                               errors.ECODE_NOTUNIQUE)
340

    
341
  if not secondary_ip:
342
    if primary_ip_version == constants.IP6_VERSION:
343
      raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
344
                                 " IPv4 address must be given as secondary",
345
                                 errors.ECODE_INVAL)
346
    secondary_ip = hostname.ip
347

    
348
  if not netutils.IP4Address.IsValid(secondary_ip):
349
    raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
350
                               " IPv4 address." % secondary_ip,
351
                               errors.ECODE_INVAL)
352

    
353
  if not netutils.IP4Address.Own(secondary_ip):
354
    raise errors.OpPrereqError("You gave %s as secondary IP,"
355
                               " but it does not belong to this host." %
356
                               secondary_ip, errors.ECODE_ENVIRON)
357

    
358
  if vg_name is not None:
359
    # Check if volume group is valid
360
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
361
                                          constants.MIN_VG_SIZE)
362
    if vgstatus:
363
      raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
364
                                 " you are not using lvm" % vgstatus,
365
                                 errors.ECODE_INVAL)
366

    
367
  if drbd_helper is not None:
368
    try:
369
      curr_helper = bdev.BaseDRBD.GetUsermodeHelper()
370
    except errors.BlockDeviceError, err:
371
      raise errors.OpPrereqError("Error while checking drbd helper"
372
                                 " (specify --no-drbd-storage if you are not"
373
                                 " using drbd): %s" % str(err),
374
                                 errors.ECODE_ENVIRON)
375
    if drbd_helper != curr_helper:
376
      raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
377
                                 " is the current helper" % (drbd_helper,
378
                                                             curr_helper),
379
                                 errors.ECODE_INVAL)
380

    
381
  file_storage_dir = _InitFileStorage(file_storage_dir)
382
  shared_file_storage_dir = _InitSharedFileStorage(shared_file_storage_dir)
383

    
384
  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
385
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
386
                               errors.ECODE_INVAL)
387

    
388
  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
389
  if result.failed:
390
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
391
                               (master_netdev,
392
                                result.output.strip()), errors.ECODE_INVAL)
393

    
394
  dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
395
  utils.EnsureDirs(dirs)
396

    
397
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
398
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
399
  objects.NIC.CheckParameterSyntax(nicparams)
400

    
401
  if ndparams is not None:
402
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
403
  else:
404
    ndparams = dict(constants.NDC_DEFAULTS)
405

    
406
  # hvparams is a mapping of hypervisor->hvparams dict
407
  for hv_name, hv_params in hvparams.iteritems():
408
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
409
    hv_class = hypervisor.GetHypervisor(hv_name)
410
    hv_class.CheckParameterSyntax(hv_params)
411

    
412
  # set up ssh config and /etc/hosts
413
  sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
414
  sshkey = sshline.split(" ")[1]
415

    
416
  if modify_etc_hosts:
417
    utils.AddHostToEtcHosts(hostname.name, hostname.ip)
418

    
419
  if modify_ssh_setup:
420
    _InitSSHSetup()
421

    
422
  if default_iallocator is not None:
423
    alloc_script = utils.FindFile(default_iallocator,
424
                                  constants.IALLOCATOR_SEARCH_PATH,
425
                                  os.path.isfile)
426
    if alloc_script is None:
427
      raise errors.OpPrereqError("Invalid default iallocator script '%s'"
428
                                 " specified" % default_iallocator,
429
                                 errors.ECODE_INVAL)
430

    
431
  now = time.time()
432

    
433
  # init of cluster config file
434
  cluster_config = objects.Cluster(
435
    serial_no=1,
436
    rsahostkeypub=sshkey,
437
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
438
    mac_prefix=mac_prefix,
439
    volume_group_name=vg_name,
440
    tcpudp_port_pool=set(),
441
    master_node=hostname.name,
442
    master_ip=clustername.ip,
443
    master_netdev=master_netdev,
444
    cluster_name=clustername.name,
445
    file_storage_dir=file_storage_dir,
446
    shared_file_storage_dir=shared_file_storage_dir,
447
    enabled_hypervisors=enabled_hypervisors,
448
    beparams={constants.PP_DEFAULT: beparams},
449
    nicparams={constants.PP_DEFAULT: nicparams},
450
    ndparams=ndparams,
451
    hvparams=hvparams,
452
    candidate_pool_size=candidate_pool_size,
453
    modify_etc_hosts=modify_etc_hosts,
454
    modify_ssh_setup=modify_ssh_setup,
455
    uid_pool=uid_pool,
456
    ctime=now,
457
    mtime=now,
458
    maintain_node_health=maintain_node_health,
459
    drbd_usermode_helper=drbd_helper,
460
    default_iallocator=default_iallocator,
461
    primary_ip_family=ipcls.family,
462
    prealloc_wipe_disks=prealloc_wipe_disks,
463
    )
464
  master_node_config = objects.Node(name=hostname.name,
465
                                    primary_ip=hostname.ip,
466
                                    secondary_ip=secondary_ip,
467
                                    serial_no=1,
468
                                    master_candidate=True,
469
                                    offline=False, drained=False,
470
                                    ctime=now, mtime=now,
471
                                    )
472
  InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
473
  cfg = config.ConfigWriter(offline=True)
474
  ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
475
  cfg.Update(cfg.GetClusterInfo(), logging.error)
476
  backend.WriteSsconfFiles(cfg.GetSsconfValues())
477

    
478
  # set up the inter-node password and certificate
479
  _InitGanetiServerSetup(hostname.name)
480

    
481
  logging.debug("Starting daemons")
482
  result = utils.RunCmd([constants.DAEMON_UTIL, "start-all"])
483
  if result.failed:
484
    raise errors.OpExecError("Could not start daemons, command %s"
485
                             " had exitcode %s and error %s" %
486
                             (result.cmd, result.exit_code, result.output))
487

    
488
  _WaitForMasterDaemon()
489

    
490

    
491
def InitConfig(version, cluster_config, master_node_config,
492
               cfg_file=constants.CLUSTER_CONF_FILE):
493
  """Create the initial cluster configuration.
494

495
  It will contain the current node, which will also be the master
496
  node, and no instances.
497

498
  @type version: int
499
  @param version: configuration version
500
  @type cluster_config: L{objects.Cluster}
501
  @param cluster_config: cluster configuration
502
  @type master_node_config: L{objects.Node}
503
  @param master_node_config: master node configuration
504
  @type cfg_file: string
505
  @param cfg_file: configuration file path
506

507
  """
508
  uuid_generator = config.TemporaryReservationManager()
509
  cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
510
                                                _INITCONF_ECID)
511
  master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
512
                                                    _INITCONF_ECID)
513
  nodes = {
514
    master_node_config.name: master_node_config,
515
    }
516
  default_nodegroup = objects.NodeGroup(
517
    uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
518
    name=constants.INITIAL_NODE_GROUP_NAME,
519
    members=[master_node_config.name],
520
    )
521
  nodegroups = {
522
    default_nodegroup.uuid: default_nodegroup,
523
    }
524
  now = time.time()
525
  config_data = objects.ConfigData(version=version,
526
                                   cluster=cluster_config,
527
                                   nodegroups=nodegroups,
528
                                   nodes=nodes,
529
                                   instances={},
530
                                   networks={},
531
                                   serial_no=1,
532
                                   ctime=now, mtime=now)
533
  utils.WriteFile(cfg_file,
534
                  data=serializer.Dump(config_data.ToDict()),
535
                  mode=0600)
536

    
537

    
538
def FinalizeClusterDestroy(master):
539
  """Execute the last steps of cluster destroy
540

541
  This function shuts down all the daemons, completing the destroy
542
  begun in cmdlib.LUDestroyOpcode.
543

544
  """
545
  cfg = config.ConfigWriter()
546
  modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
547
  result = rpc.RpcRunner.call_node_stop_master(master, True)
548
  msg = result.fail_msg
549
  if msg:
550
    logging.warning("Could not disable the master role: %s", msg)
551
  result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup)
552
  msg = result.fail_msg
553
  if msg:
554
    logging.warning("Could not shutdown the node daemon and cleanup"
555
                    " the node: %s", msg)
556

    
557

    
558
def SetupNodeDaemon(cluster_name, node, ssh_key_check):
559
  """Add a node to the cluster.
560

561
  This function must be called before the actual opcode, and will ssh
562
  to the remote node, copy the needed files, and start ganeti-noded,
563
  allowing the master to do the rest via normal rpc calls.
564

565
  @param cluster_name: the cluster name
566
  @param node: the name of the new node
567
  @param ssh_key_check: whether to do a strict key check
568

569
  """
570
  family = ssconf.SimpleStore().GetPrimaryIPFamily()
571
  sshrunner = ssh.SshRunner(cluster_name,
572
                            ipv6=(family == netutils.IP6Address.family))
573

    
574
  bind_address = constants.IP4_ADDRESS_ANY
575
  if family == netutils.IP6Address.family:
576
    bind_address = constants.IP6_ADDRESS_ANY
577

    
578
  # set up inter-node password and certificate and restarts the node daemon
579
  # and then connect with ssh to set password and start ganeti-noded
580
  # note that all the below variables are sanitized at this point,
581
  # either by being constants or by the checks above
582
  sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE)
583
  sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE)
584
  sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY)
585
  mycommand = ("%s stop-all; %s start %s -b %s" %
586
               (constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED,
587
                utils.ShellQuote(bind_address)))
588

    
589
  result = sshrunner.Run(node, 'root', mycommand, batch=False,
590
                         ask_key=ssh_key_check,
591
                         use_cluster_key=True,
592
                         strict_host_check=ssh_key_check)
593
  if result.failed:
594
    raise errors.OpExecError("Remote command on node %s, error: %s,"
595
                             " output: %s" %
596
                             (node, result.fail_reason, result.output))
597

    
598
  _WaitForNodeDaemon(node)
599

    
600

    
601
def MasterFailover(no_voting=False):
602
  """Failover the master node.
603

604
  This checks that we are not already the master, and will cause the
605
  current master to cease being master, and the non-master to become
606
  new master.
607

608
  @type no_voting: boolean
609
  @param no_voting: force the operation without remote nodes agreement
610
                      (dangerous)
611

612
  """
613
  sstore = ssconf.SimpleStore()
614

    
615
  old_master, new_master = ssconf.GetMasterAndMyself(sstore)
616
  node_list = sstore.GetNodeList()
617
  mc_list = sstore.GetMasterCandidates()
618

    
619
  if old_master == new_master:
620
    raise errors.OpPrereqError("This commands must be run on the node"
621
                               " where you want the new master to be."
622
                               " %s is already the master" %
623
                               old_master, errors.ECODE_INVAL)
624

    
625
  if new_master not in mc_list:
626
    mc_no_master = [name for name in mc_list if name != old_master]
627
    raise errors.OpPrereqError("This node is not among the nodes marked"
628
                               " as master candidates. Only these nodes"
629
                               " can become masters. Current list of"
630
                               " master candidates is:\n"
631
                               "%s" % ('\n'.join(mc_no_master)),
632
                               errors.ECODE_STATE)
633

    
634
  if not no_voting:
635
    vote_list = GatherMasterVotes(node_list)
636

    
637
    if vote_list:
638
      voted_master = vote_list[0][0]
639
      if voted_master is None:
640
        raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
641
                                   " not respond.", errors.ECODE_ENVIRON)
642
      elif voted_master != old_master:
643
        raise errors.OpPrereqError("I have a wrong configuration, I believe"
644
                                   " the master is %s but the other nodes"
645
                                   " voted %s. Please resync the configuration"
646
                                   " of this node." %
647
                                   (old_master, voted_master),
648
                                   errors.ECODE_STATE)
649
  # end checks
650

    
651
  rcode = 0
652

    
653
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
654

    
655
  try:
656
    # instantiate a real config writer, as we now know we have the
657
    # configuration data
658
    cfg = config.ConfigWriter(accept_foreign=True)
659

    
660
    cluster_info = cfg.GetClusterInfo()
661
    cluster_info.master_node = new_master
662
    # this will also regenerate the ssconf files, since we updated the
663
    # cluster info
664
    cfg.Update(cluster_info, logging.error)
665
  except errors.ConfigurationError, err:
666
    logging.error("Error while trying to set the new master: %s",
667
                  str(err))
668
    return 1
669

    
670
  # if cfg.Update worked, then it means the old master daemon won't be
671
  # able now to write its own config file (we rely on locking in both
672
  # backend.UploadFile() and ConfigWriter._Write(); hence the next
673
  # step is to kill the old master
674

    
675
  logging.info("Stopping the master daemon on node %s", old_master)
676

    
677
  result = rpc.RpcRunner.call_node_stop_master(old_master, True)
678
  msg = result.fail_msg
679
  if msg:
680
    logging.error("Could not disable the master role on the old master"
681
                 " %s, please disable manually: %s", old_master, msg)
682

    
683
  logging.info("Checking master IP non-reachability...")
684

    
685
  master_ip = sstore.GetMasterIP()
686
  total_timeout = 30
687
  # Here we have a phase where no master should be running
688
  def _check_ip():
689
    if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
690
      raise utils.RetryAgain()
691

    
692
  try:
693
    utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
694
  except utils.RetryTimeout:
695
    logging.warning("The master IP is still reachable after %s seconds,"
696
                    " continuing but activating the master on the current"
697
                    " node will probably fail", total_timeout)
698

    
699
  logging.info("Starting the master daemons on the new master")
700

    
701
  result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
702
  msg = result.fail_msg
703
  if msg:
704
    logging.error("Could not start the master role on the new master"
705
                  " %s, please check: %s", new_master, msg)
706
    rcode = 1
707

    
708
  logging.info("Master failed over from %s to %s", old_master, new_master)
709
  return rcode
710

    
711

    
712
def GetMaster():
713
  """Returns the current master node.
714

715
  This is a separate function in bootstrap since it's needed by
716
  gnt-cluster, and instead of importing directly ssconf, it's better
717
  to abstract it in bootstrap, where we do use ssconf in other
718
  functions too.
719

720
  """
721
  sstore = ssconf.SimpleStore()
722

    
723
  old_master, _ = ssconf.GetMasterAndMyself(sstore)
724

    
725
  return old_master
726

    
727

    
728
def GatherMasterVotes(node_list):
729
  """Check the agreement on who is the master.
730

731
  This function will return a list of (node, number of votes), ordered
732
  by the number of votes. Errors will be denoted by the key 'None'.
733

734
  Note that the sum of votes is the number of nodes this machine
735
  knows, whereas the number of entries in the list could be different
736
  (if some nodes vote for another master).
737

738
  We remove ourselves from the list since we know that (bugs aside)
739
  since we use the same source for configuration information for both
740
  backend and boostrap, we'll always vote for ourselves.
741

742
  @type node_list: list
743
  @param node_list: the list of nodes to query for master info; the current
744
      node will be removed if it is in the list
745
  @rtype: list
746
  @return: list of (node, votes)
747

748
  """
749
  myself = netutils.Hostname.GetSysName()
750
  try:
751
    node_list.remove(myself)
752
  except ValueError:
753
    pass
754
  if not node_list:
755
    # no nodes left (eventually after removing myself)
756
    return []
757
  results = rpc.RpcRunner.call_master_info(node_list)
758
  if not isinstance(results, dict):
759
    # this should not happen (unless internal error in rpc)
760
    logging.critical("Can't complete rpc call, aborting master startup")
761
    return [(None, len(node_list))]
762
  votes = {}
763
  for node in results:
764
    nres = results[node]
765
    data = nres.payload
766
    msg = nres.fail_msg
767
    fail = False
768
    if msg:
769
      logging.warning("Error contacting node %s: %s", node, msg)
770
      fail = True
771
    # for now we accept both length 3 and 4 (data[3] is primary ip version)
772
    elif not isinstance(data, (tuple, list)) or len(data) < 3:
773
      logging.warning("Invalid data received from node %s: %s", node, data)
774
      fail = True
775
    if fail:
776
      if None not in votes:
777
        votes[None] = 0
778
      votes[None] += 1
779
      continue
780
    master_node = data[2]
781
    if master_node not in votes:
782
      votes[master_node] = 0
783
    votes[master_node] += 1
784

    
785
  vote_list = [v for v in votes.items()]
786
  # sort first on number of votes then on name, since we want None
787
  # sorted later if we have the half of the nodes not responding, and
788
  # half voting all for the same master
789
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
790

    
791
  return vote_list