Statistics
| Branch: | Tag: | Revision:

root / lib / bootstrap.py @ 87bc7ca8

History | View | Annotate | Download (28.7 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008, 2010, 2011 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions to bootstrap a new cluster.
23

24
"""
25

    
26
import os
27
import os.path
28
import re
29
import logging
30
import time
31

    
32
from ganeti import rpc
33
from ganeti import ssh
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import config
37
from ganeti import constants
38
from ganeti import objects
39
from ganeti import ssconf
40
from ganeti import serializer
41
from ganeti import hypervisor
42
from ganeti import bdev
43
from ganeti import netutils
44
from ganeti import backend
45
from ganeti import luxi
46

    
47

    
48
# ec_id for InitConfig's temporary reservation manager
49
_INITCONF_ECID = "initconfig-ecid"
50

    
51
#: After how many seconds daemon must be responsive
52
_DAEMON_READY_TIMEOUT = 10.0
53

    
54

    
55
def _InitSSHSetup():
56
  """Setup the SSH configuration for the cluster.
57

58
  This generates a dsa keypair for root, adds the pub key to the
59
  permitted hosts and adds the hostkey to its own known hosts.
60

61
  """
62
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
63

    
64
  for name in priv_key, pub_key:
65
    if os.path.exists(name):
66
      utils.CreateBackup(name)
67
    utils.RemoveFile(name)
68

    
69
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
70
                         "-f", priv_key,
71
                         "-q", "-N", ""])
72
  if result.failed:
73
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
74
                             result.output)
75

    
76
  utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
77

    
78

    
79
def GenerateHmacKey(file_name):
80
  """Writes a new HMAC key.
81

82
  @type file_name: str
83
  @param file_name: Path to output file
84

85
  """
86
  utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
87
                  backup=True)
88

    
89

    
90
def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_confd_hmac_key,
91
                          new_cds, rapi_cert_pem=None, cds=None,
92
                          nodecert_file=constants.NODED_CERT_FILE,
93
                          rapicert_file=constants.RAPI_CERT_FILE,
94
                          hmackey_file=constants.CONFD_HMAC_KEY,
95
                          cds_file=constants.CLUSTER_DOMAIN_SECRET_FILE):
96
  """Updates the cluster certificates, keys and secrets.
97

98
  @type new_cluster_cert: bool
99
  @param new_cluster_cert: Whether to generate a new cluster certificate
100
  @type new_rapi_cert: bool
101
  @param new_rapi_cert: Whether to generate a new RAPI certificate
102
  @type new_confd_hmac_key: bool
103
  @param new_confd_hmac_key: Whether to generate a new HMAC key
104
  @type new_cds: bool
105
  @param new_cds: Whether to generate a new cluster domain secret
106
  @type rapi_cert_pem: string
107
  @param rapi_cert_pem: New RAPI certificate in PEM format
108
  @type cds: string
109
  @param cds: New cluster domain secret
110
  @type nodecert_file: string
111
  @param nodecert_file: optional override of the node cert file path
112
  @type rapicert_file: string
113
  @param rapicert_file: optional override of the rapi cert file path
114
  @type hmackey_file: string
115
  @param hmackey_file: optional override of the hmac key file path
116

117
  """
118
  # noded SSL certificate
119
  cluster_cert_exists = os.path.exists(nodecert_file)
120
  if new_cluster_cert or not cluster_cert_exists:
121
    if cluster_cert_exists:
122
      utils.CreateBackup(nodecert_file)
123

    
124
    logging.debug("Generating new cluster certificate at %s", nodecert_file)
125
    utils.GenerateSelfSignedSslCert(nodecert_file)
126

    
127
  # confd HMAC key
128
  if new_confd_hmac_key or not os.path.exists(hmackey_file):
129
    logging.debug("Writing new confd HMAC key to %s", hmackey_file)
130
    GenerateHmacKey(hmackey_file)
131

    
132
  # RAPI
133
  rapi_cert_exists = os.path.exists(rapicert_file)
134

    
135
  if rapi_cert_pem:
136
    # Assume rapi_pem contains a valid PEM-formatted certificate and key
137
    logging.debug("Writing RAPI certificate at %s", rapicert_file)
138
    utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
139

    
140
  elif new_rapi_cert or not rapi_cert_exists:
141
    if rapi_cert_exists:
142
      utils.CreateBackup(rapicert_file)
143

    
144
    logging.debug("Generating new RAPI certificate at %s", rapicert_file)
145
    utils.GenerateSelfSignedSslCert(rapicert_file)
146

    
147
  # Cluster domain secret
148
  if cds:
149
    logging.debug("Writing cluster domain secret to %s", cds_file)
150
    utils.WriteFile(cds_file, data=cds, backup=True)
151

    
152
  elif new_cds or not os.path.exists(cds_file):
153
    logging.debug("Generating new cluster domain secret at %s", cds_file)
154
    GenerateHmacKey(cds_file)
155

    
156

    
157
def _InitGanetiServerSetup(master_name):
158
  """Setup the necessary configuration for the initial node daemon.
159

160
  This creates the nodepass file containing the shared password for
161
  the cluster, generates the SSL certificate and starts the node daemon.
162

163
  @type master_name: str
164
  @param master_name: Name of the master node
165

166
  """
167
  # Generate cluster secrets
168
  GenerateClusterCrypto(True, False, False, False)
169

    
170
  result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
171
  if result.failed:
172
    raise errors.OpExecError("Could not start the node daemon, command %s"
173
                             " had exitcode %s and error %s" %
174
                             (result.cmd, result.exit_code, result.output))
175

    
176
  _WaitForNodeDaemon(master_name)
177

    
178

    
179
def _WaitForNodeDaemon(node_name):
180
  """Wait for node daemon to become responsive.
181

182
  """
183
  def _CheckNodeDaemon():
184
    result = rpc.RpcRunner.call_version([node_name])[node_name]
185
    if result.fail_msg:
186
      raise utils.RetryAgain()
187

    
188
  try:
189
    utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
190
  except utils.RetryTimeout:
191
    raise errors.OpExecError("Node daemon on %s didn't answer queries within"
192
                             " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
193

    
194

    
195
def _WaitForMasterDaemon():
196
  """Wait for master daemon to become responsive.
197

198
  """
199
  def _CheckMasterDaemon():
200
    try:
201
      cl = luxi.Client()
202
      (cluster_name, ) = cl.QueryConfigValues(["cluster_name"])
203
    except Exception:
204
      raise utils.RetryAgain()
205

    
206
    logging.debug("Received cluster name %s from master", cluster_name)
207

    
208
  try:
209
    utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
210
  except utils.RetryTimeout:
211
    raise errors.OpExecError("Master daemon didn't answer queries within"
212
                             " %s seconds" % _DAEMON_READY_TIMEOUT)
213

    
214

    
215
def _InitFileStorage(file_storage_dir):
216
  """Initialize if needed the file storage.
217

218
  @param file_storage_dir: the user-supplied value
219
  @return: either empty string (if file storage was disabled at build
220
      time) or the normalized path to the storage directory
221

222
  """
223
  if not constants.ENABLE_FILE_STORAGE:
224
    return ""
225

    
226
  file_storage_dir = os.path.normpath(file_storage_dir)
227

    
228
  if not os.path.isabs(file_storage_dir):
229
    raise errors.OpPrereqError("The file storage directory you passed is"
230
                               " not an absolute path.", errors.ECODE_INVAL)
231

    
232
  if not os.path.exists(file_storage_dir):
233
    try:
234
      os.makedirs(file_storage_dir, 0750)
235
    except OSError, err:
236
      raise errors.OpPrereqError("Cannot create file storage directory"
237
                                 " '%s': %s" % (file_storage_dir, err),
238
                                 errors.ECODE_ENVIRON)
239

    
240
  if not os.path.isdir(file_storage_dir):
241
    raise errors.OpPrereqError("The file storage directory '%s' is not"
242
                               " a directory." % file_storage_dir,
243
                               errors.ECODE_ENVIRON)
244
  return file_storage_dir
245

    
246

    
247
def _InitSharedFileStorage(shared_file_storage_dir):
248
  """Initialize if needed the shared file storage.
249

250
  @param shared_file_storage_dir: the user-supplied value
251
  @return: either empty string (if file storage was disabled at build
252
      time) or the normalized path to the storage directory
253

254
  """
255
  if not constants.ENABLE_SHARED_FILE_STORAGE:
256
    return ""
257

    
258
  shared_file_storage_dir = os.path.normpath(shared_file_storage_dir)
259

    
260
  if not os.path.isabs(shared_file_storage_dir):
261
    raise errors.OpPrereqError("The shared file storage directory you"
262
                               " passed is not an absolute path.",
263
                               errors.ECODE_INVAL)
264

    
265
  if not os.path.exists(shared_file_storage_dir):
266
    try:
267
      os.makedirs(shared_file_storage_dir, 0750)
268
    except OSError, err:
269
      raise errors.OpPrereqError("Cannot create file storage directory"
270
                                 " '%s': %s" % (shared_file_storage_dir, err),
271
                                 errors.ECODE_ENVIRON)
272

    
273
  if not os.path.isdir(shared_file_storage_dir):
274
    raise errors.OpPrereqError("The file storage directory '%s' is not"
275
                               " a directory." % shared_file_storage_dir,
276
                               errors.ECODE_ENVIRON)
277
  return shared_file_storage_dir
278

    
279

    
280
def InitCluster(cluster_name, mac_prefix, # pylint: disable-msg=R0913
281
                master_netdev, file_storage_dir, shared_file_storage_dir,
282
                candidate_pool_size, secondary_ip=None, vg_name=None,
283
                beparams=None, nicparams=None, ndparams=None, hvparams=None,
284
                enabled_hypervisors=None, modify_etc_hosts=True,
285
                modify_ssh_setup=True, maintain_node_health=False,
286
                drbd_helper=None, uid_pool=None, default_iallocator=None,
287
                primary_ip_version=None, prealloc_wipe_disks=False):
288
  """Initialise the cluster.
289

290
  @type candidate_pool_size: int
291
  @param candidate_pool_size: master candidate pool size
292

293
  """
294
  # TODO: complete the docstring
295
  if config.ConfigWriter.IsCluster():
296
    raise errors.OpPrereqError("Cluster is already initialised",
297
                               errors.ECODE_STATE)
298

    
299
  if not enabled_hypervisors:
300
    raise errors.OpPrereqError("Enabled hypervisors list must contain at"
301
                               " least one member", errors.ECODE_INVAL)
302
  invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
303
  if invalid_hvs:
304
    raise errors.OpPrereqError("Enabled hypervisors contains invalid"
305
                               " entries: %s" % invalid_hvs,
306
                               errors.ECODE_INVAL)
307

    
308

    
309
  ipcls = None
310
  if primary_ip_version == constants.IP4_VERSION:
311
    ipcls = netutils.IP4Address
312
  elif primary_ip_version == constants.IP6_VERSION:
313
    ipcls = netutils.IP6Address
314
  else:
315
    raise errors.OpPrereqError("Invalid primary ip version: %d." %
316
                               primary_ip_version)
317

    
318
  hostname = netutils.GetHostname(family=ipcls.family)
319
  if not ipcls.IsValid(hostname.ip):
320
    raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
321
                               " address." % (hostname.ip, primary_ip_version))
322

    
323
  if ipcls.IsLoopback(hostname.ip):
324
    raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
325
                               " address. Please fix DNS or %s." %
326
                               (hostname.ip, constants.ETC_HOSTS),
327
                               errors.ECODE_ENVIRON)
328

    
329
  if not ipcls.Own(hostname.ip):
330
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
331
                               " to %s,\nbut this ip address does not"
332
                               " belong to this host" %
333
                               hostname.ip, errors.ECODE_ENVIRON)
334

    
335
  clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
336

    
337
  if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
338
    raise errors.OpPrereqError("Cluster IP already active",
339
                               errors.ECODE_NOTUNIQUE)
340

    
341
  if not secondary_ip:
342
    if primary_ip_version == constants.IP6_VERSION:
343
      raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
344
                                 " IPv4 address must be given as secondary",
345
                                 errors.ECODE_INVAL)
346
    secondary_ip = hostname.ip
347

    
348
  if not netutils.IP4Address.IsValid(secondary_ip):
349
    raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
350
                               " IPv4 address." % secondary_ip,
351
                               errors.ECODE_INVAL)
352

    
353
  if not netutils.IP4Address.Own(secondary_ip):
354
    raise errors.OpPrereqError("You gave %s as secondary IP,"
355
                               " but it does not belong to this host." %
356
                               secondary_ip, errors.ECODE_ENVIRON)
357

    
358
  if vg_name is not None:
359
    # Check if volume group is valid
360
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
361
                                          constants.MIN_VG_SIZE)
362
    if vgstatus:
363
      raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
364
                                 " you are not using lvm" % vgstatus,
365
                                 errors.ECODE_INVAL)
366

    
367
  if drbd_helper is not None:
368
    try:
369
      curr_helper = bdev.BaseDRBD.GetUsermodeHelper()
370
    except errors.BlockDeviceError, err:
371
      raise errors.OpPrereqError("Error while checking drbd helper"
372
                                 " (specify --no-drbd-storage if you are not"
373
                                 " using drbd): %s" % str(err),
374
                                 errors.ECODE_ENVIRON)
375
    if drbd_helper != curr_helper:
376
      raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
377
                                 " is the current helper" % (drbd_helper,
378
                                                             curr_helper),
379
                                 errors.ECODE_INVAL)
380

    
381
  file_storage_dir = _InitFileStorage(file_storage_dir)
382
  shared_file_storage_dir = _InitSharedFileStorage(shared_file_storage_dir)
383

    
384
  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
385
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
386
                               errors.ECODE_INVAL)
387

    
388
  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
389
  if result.failed:
390
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
391
                               (master_netdev,
392
                                result.output.strip()), errors.ECODE_INVAL)
393

    
394
  dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
395
  utils.EnsureDirs(dirs)
396

    
397
  utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
398
  utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
399
  objects.NIC.CheckParameterSyntax(nicparams)
400

    
401
  if ndparams is not None:
402
    utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
403
  else:
404
    ndparams = dict(constants.NDC_DEFAULTS)
405

    
406
  # hvparams is a mapping of hypervisor->hvparams dict
407
  for hv_name, hv_params in hvparams.iteritems():
408
    utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
409
    hv_class = hypervisor.GetHypervisor(hv_name)
410
    hv_class.CheckParameterSyntax(hv_params)
411

    
412
  # set up ssh config and /etc/hosts
413
  sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
414
  sshkey = sshline.split(" ")[1]
415

    
416
  if modify_etc_hosts:
417
    utils.AddHostToEtcHosts(hostname.name, hostname.ip)
418

    
419
  if modify_ssh_setup:
420
    _InitSSHSetup()
421

    
422
  if default_iallocator is not None:
423
    alloc_script = utils.FindFile(default_iallocator,
424
                                  constants.IALLOCATOR_SEARCH_PATH,
425
                                  os.path.isfile)
426
    if alloc_script is None:
427
      raise errors.OpPrereqError("Invalid default iallocator script '%s'"
428
                                 " specified" % default_iallocator,
429
                                 errors.ECODE_INVAL)
430

    
431
  now = time.time()
432

    
433
  # init of cluster config file
434
  cluster_config = objects.Cluster(
435
    serial_no=1,
436
    rsahostkeypub=sshkey,
437
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
438
    mac_prefix=mac_prefix,
439
    volume_group_name=vg_name,
440
    tcpudp_port_pool=set(),
441
    master_node=hostname.name,
442
    master_ip=clustername.ip,
443
    master_netdev=master_netdev,
444
    cluster_name=clustername.name,
445
    file_storage_dir=file_storage_dir,
446
    shared_file_storage_dir=shared_file_storage_dir,
447
    enabled_hypervisors=enabled_hypervisors,
448
    beparams={constants.PP_DEFAULT: beparams},
449
    nicparams={constants.PP_DEFAULT: nicparams},
450
    ndparams=ndparams,
451
    hvparams=hvparams,
452
    candidate_pool_size=candidate_pool_size,
453
    modify_etc_hosts=modify_etc_hosts,
454
    modify_ssh_setup=modify_ssh_setup,
455
    uid_pool=uid_pool,
456
    ctime=now,
457
    mtime=now,
458
    maintain_node_health=maintain_node_health,
459
    drbd_usermode_helper=drbd_helper,
460
    default_iallocator=default_iallocator,
461
    primary_ip_family=ipcls.family,
462
    prealloc_wipe_disks=prealloc_wipe_disks,
463
    )
464
  master_node_config = objects.Node(name=hostname.name,
465
                                    primary_ip=hostname.ip,
466
                                    secondary_ip=secondary_ip,
467
                                    serial_no=1,
468
                                    master_candidate=True,
469
                                    offline=False, drained=False,
470
                                    ctime=now, mtime=now,
471
                                    )
472
  InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
473
  cfg = config.ConfigWriter(offline=True)
474
  ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
475
  cfg.Update(cfg.GetClusterInfo(), logging.error)
476
  backend.WriteSsconfFiles(cfg.GetSsconfValues())
477

    
478
  # set up the inter-node password and certificate
479
  _InitGanetiServerSetup(hostname.name)
480

    
481
  logging.debug("Starting daemons")
482
  result = utils.RunCmd([constants.DAEMON_UTIL, "start-all"])
483
  if result.failed:
484
    raise errors.OpExecError("Could not start daemons, command %s"
485
                             " had exitcode %s and error %s" %
486
                             (result.cmd, result.exit_code, result.output))
487

    
488
  _WaitForMasterDaemon()
489

    
490

    
491
def InitConfig(version, cluster_config, master_node_config,
492
               cfg_file=constants.CLUSTER_CONF_FILE):
493
  """Create the initial cluster configuration.
494

495
  It will contain the current node, which will also be the master
496
  node, and no instances.
497

498
  @type version: int
499
  @param version: configuration version
500
  @type cluster_config: L{objects.Cluster}
501
  @param cluster_config: cluster configuration
502
  @type master_node_config: L{objects.Node}
503
  @param master_node_config: master node configuration
504
  @type cfg_file: string
505
  @param cfg_file: configuration file path
506

507
  """
508
  uuid_generator = config.TemporaryReservationManager()
509
  cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
510
                                                _INITCONF_ECID)
511
  master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
512
                                                    _INITCONF_ECID)
513
  nodes = {
514
    master_node_config.name: master_node_config,
515
    }
516
  default_nodegroup = objects.NodeGroup(
517
    uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
518
    name=constants.INITIAL_NODE_GROUP_NAME,
519
    members=[master_node_config.name],
520
    )
521
  nodegroups = {
522
    default_nodegroup.uuid: default_nodegroup,
523
    }
524
  now = time.time()
525
  config_data = objects.ConfigData(version=version,
526
                                   cluster=cluster_config,
527
                                   nodegroups=nodegroups,
528
                                   nodes=nodes,
529
                                   instances={},
530
                                   serial_no=1,
531
                                   ctime=now, mtime=now)
532
  utils.WriteFile(cfg_file,
533
                  data=serializer.Dump(config_data.ToDict()),
534
                  mode=0600)
535

    
536

    
537
def FinalizeClusterDestroy(master):
538
  """Execute the last steps of cluster destroy
539

540
  This function shuts down all the daemons, completing the destroy
541
  begun in cmdlib.LUDestroyOpcode.
542

543
  """
544
  cfg = config.ConfigWriter()
545
  modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
546
  result = rpc.RpcRunner.call_node_stop_master(master, True)
547
  msg = result.fail_msg
548
  if msg:
549
    logging.warning("Could not disable the master role: %s", msg)
550
  result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup)
551
  msg = result.fail_msg
552
  if msg:
553
    logging.warning("Could not shutdown the node daemon and cleanup"
554
                    " the node: %s", msg)
555

    
556

    
557
def SetupNodeDaemon(cluster_name, node, ssh_key_check):
558
  """Add a node to the cluster.
559

560
  This function must be called before the actual opcode, and will ssh
561
  to the remote node, copy the needed files, and start ganeti-noded,
562
  allowing the master to do the rest via normal rpc calls.
563

564
  @param cluster_name: the cluster name
565
  @param node: the name of the new node
566
  @param ssh_key_check: whether to do a strict key check
567

568
  """
569
  family = ssconf.SimpleStore().GetPrimaryIPFamily()
570
  sshrunner = ssh.SshRunner(cluster_name,
571
                            ipv6=(family == netutils.IP6Address.family))
572

    
573
  bind_address = constants.IP4_ADDRESS_ANY
574
  if family == netutils.IP6Address.family:
575
    bind_address = constants.IP6_ADDRESS_ANY
576

    
577
  # set up inter-node password and certificate and restarts the node daemon
578
  # and then connect with ssh to set password and start ganeti-noded
579
  # note that all the below variables are sanitized at this point,
580
  # either by being constants or by the checks above
581
  sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE)
582
  sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE)
583
  sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY)
584
  mycommand = ("%s stop-all; %s start %s -b %s" %
585
               (constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED,
586
                utils.ShellQuote(bind_address)))
587

    
588
  result = sshrunner.Run(node, 'root', mycommand, batch=False,
589
                         ask_key=ssh_key_check,
590
                         use_cluster_key=True,
591
                         strict_host_check=ssh_key_check)
592
  if result.failed:
593
    raise errors.OpExecError("Remote command on node %s, error: %s,"
594
                             " output: %s" %
595
                             (node, result.fail_reason, result.output))
596

    
597
  _WaitForNodeDaemon(node)
598

    
599

    
600
def MasterFailover(no_voting=False):
601
  """Failover the master node.
602

603
  This checks that we are not already the master, and will cause the
604
  current master to cease being master, and the non-master to become
605
  new master.
606

607
  @type no_voting: boolean
608
  @param no_voting: force the operation without remote nodes agreement
609
                      (dangerous)
610

611
  """
612
  sstore = ssconf.SimpleStore()
613

    
614
  old_master, new_master = ssconf.GetMasterAndMyself(sstore)
615
  node_list = sstore.GetNodeList()
616
  mc_list = sstore.GetMasterCandidates()
617

    
618
  if old_master == new_master:
619
    raise errors.OpPrereqError("This commands must be run on the node"
620
                               " where you want the new master to be."
621
                               " %s is already the master" %
622
                               old_master, errors.ECODE_INVAL)
623

    
624
  if new_master not in mc_list:
625
    mc_no_master = [name for name in mc_list if name != old_master]
626
    raise errors.OpPrereqError("This node is not among the nodes marked"
627
                               " as master candidates. Only these nodes"
628
                               " can become masters. Current list of"
629
                               " master candidates is:\n"
630
                               "%s" % ('\n'.join(mc_no_master)),
631
                               errors.ECODE_STATE)
632

    
633
  if not no_voting:
634
    vote_list = GatherMasterVotes(node_list)
635

    
636
    if vote_list:
637
      voted_master = vote_list[0][0]
638
      if voted_master is None:
639
        raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
640
                                   " not respond.", errors.ECODE_ENVIRON)
641
      elif voted_master != old_master:
642
        raise errors.OpPrereqError("I have a wrong configuration, I believe"
643
                                   " the master is %s but the other nodes"
644
                                   " voted %s. Please resync the configuration"
645
                                   " of this node." %
646
                                   (old_master, voted_master),
647
                                   errors.ECODE_STATE)
648
  # end checks
649

    
650
  rcode = 0
651

    
652
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
653

    
654
  try:
655
    # instantiate a real config writer, as we now know we have the
656
    # configuration data
657
    cfg = config.ConfigWriter(accept_foreign=True)
658

    
659
    cluster_info = cfg.GetClusterInfo()
660
    cluster_info.master_node = new_master
661
    # this will also regenerate the ssconf files, since we updated the
662
    # cluster info
663
    cfg.Update(cluster_info, logging.error)
664
  except errors.ConfigurationError, err:
665
    logging.error("Error while trying to set the new master: %s",
666
                  str(err))
667
    return 1
668

    
669
  # if cfg.Update worked, then it means the old master daemon won't be
670
  # able now to write its own config file (we rely on locking in both
671
  # backend.UploadFile() and ConfigWriter._Write(); hence the next
672
  # step is to kill the old master
673

    
674
  logging.info("Stopping the master daemon on node %s", old_master)
675

    
676
  result = rpc.RpcRunner.call_node_stop_master(old_master, True)
677
  msg = result.fail_msg
678
  if msg:
679
    logging.error("Could not disable the master role on the old master"
680
                 " %s, please disable manually: %s", old_master, msg)
681

    
682
  logging.info("Checking master IP non-reachability...")
683

    
684
  master_ip = sstore.GetMasterIP()
685
  total_timeout = 30
686
  # Here we have a phase where no master should be running
687
  def _check_ip():
688
    if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
689
      raise utils.RetryAgain()
690

    
691
  try:
692
    utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
693
  except utils.RetryTimeout:
694
    logging.warning("The master IP is still reachable after %s seconds,"
695
                    " continuing but activating the master on the current"
696
                    " node will probably fail", total_timeout)
697

    
698
  logging.info("Starting the master daemons on the new master")
699

    
700
  result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
701
  msg = result.fail_msg
702
  if msg:
703
    logging.error("Could not start the master role on the new master"
704
                  " %s, please check: %s", new_master, msg)
705
    rcode = 1
706

    
707
  logging.info("Master failed over from %s to %s", old_master, new_master)
708
  return rcode
709

    
710

    
711
def GetMaster():
712
  """Returns the current master node.
713

714
  This is a separate function in bootstrap since it's needed by
715
  gnt-cluster, and instead of importing directly ssconf, it's better
716
  to abstract it in bootstrap, where we do use ssconf in other
717
  functions too.
718

719
  """
720
  sstore = ssconf.SimpleStore()
721

    
722
  old_master, _ = ssconf.GetMasterAndMyself(sstore)
723

    
724
  return old_master
725

    
726

    
727
def GatherMasterVotes(node_list):
728
  """Check the agreement on who is the master.
729

730
  This function will return a list of (node, number of votes), ordered
731
  by the number of votes. Errors will be denoted by the key 'None'.
732

733
  Note that the sum of votes is the number of nodes this machine
734
  knows, whereas the number of entries in the list could be different
735
  (if some nodes vote for another master).
736

737
  We remove ourselves from the list since we know that (bugs aside)
738
  since we use the same source for configuration information for both
739
  backend and boostrap, we'll always vote for ourselves.
740

741
  @type node_list: list
742
  @param node_list: the list of nodes to query for master info; the current
743
      node will be removed if it is in the list
744
  @rtype: list
745
  @return: list of (node, votes)
746

747
  """
748
  myself = netutils.Hostname.GetSysName()
749
  try:
750
    node_list.remove(myself)
751
  except ValueError:
752
    pass
753
  if not node_list:
754
    # no nodes left (eventually after removing myself)
755
    return []
756
  results = rpc.RpcRunner.call_master_info(node_list)
757
  if not isinstance(results, dict):
758
    # this should not happen (unless internal error in rpc)
759
    logging.critical("Can't complete rpc call, aborting master startup")
760
    return [(None, len(node_list))]
761
  votes = {}
762
  for node in results:
763
    nres = results[node]
764
    data = nres.payload
765
    msg = nres.fail_msg
766
    fail = False
767
    if msg:
768
      logging.warning("Error contacting node %s: %s", node, msg)
769
      fail = True
770
    # for now we accept both length 3 and 4 (data[3] is primary ip version)
771
    elif not isinstance(data, (tuple, list)) or len(data) < 3:
772
      logging.warning("Invalid data received from node %s: %s", node, data)
773
      fail = True
774
    if fail:
775
      if None not in votes:
776
        votes[None] = 0
777
      votes[None] += 1
778
      continue
779
    master_node = data[2]
780
    if master_node not in votes:
781
      votes[master_node] = 0
782
    votes[master_node] += 1
783

    
784
  vote_list = [v for v in votes.items()]
785
  # sort first on number of votes then on name, since we want None
786
  # sorted later if we have the half of the nodes not responding, and
787
  # half voting all for the same master
788
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
789

    
790
  return vote_list