Statistics
| Branch: | Tag: | Revision:

root / lib / bootstrap.py @ ce735215

History | View | Annotate | Download (15.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions to bootstrap a new cluster.
23

24
"""
25

    
26
import os
27
import os.path
28
import sha
29
import re
30
import logging
31

    
32
from ganeti import rpc
33
from ganeti import ssh
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import config
37
from ganeti import constants
38
from ganeti import objects
39
from ganeti import ssconf
40

    
41
def _InitSSHSetup(node):
42
  """Setup the SSH configuration for the cluster.
43

44

45
  This generates a dsa keypair for root, adds the pub key to the
46
  permitted hosts and adds the hostkey to its own known hosts.
47

48
  Args:
49
    node: the name of this host as a fqdn
50

51
  """
52
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
53

    
54
  for name in priv_key, pub_key:
55
    if os.path.exists(name):
56
      utils.CreateBackup(name)
57
    utils.RemoveFile(name)
58

    
59
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
60
                         "-f", priv_key,
61
                         "-q", "-N", ""])
62
  if result.failed:
63
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
64
                             result.output)
65

    
66
  f = open(pub_key, 'r')
67
  try:
68
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
69
  finally:
70
    f.close()
71

    
72

    
73
def _InitGanetiServerSetup():
74
  """Setup the necessary configuration for the initial node daemon.
75

76
  This creates the nodepass file containing the shared password for
77
  the cluster and also generates the SSL certificate.
78

79
  """
80
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
81
                         "-days", str(365*5), "-nodes", "-x509",
82
                         "-keyout", constants.SSL_CERT_FILE,
83
                         "-out", constants.SSL_CERT_FILE, "-batch"])
84
  if result.failed:
85
    raise errors.OpExecError("could not generate server ssl cert, command"
86
                             " %s had exitcode %s and error message %s" %
87
                             (result.cmd, result.exit_code, result.output))
88

    
89
  os.chmod(constants.SSL_CERT_FILE, 0400)
90

    
91
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
92

    
93
  if result.failed:
94
    raise errors.OpExecError("Could not start the node daemon, command %s"
95
                             " had exitcode %s and error %s" %
96
                             (result.cmd, result.exit_code, result.output))
97

    
98

    
99
def InitCluster(cluster_name, mac_prefix, def_bridge,
100
                master_netdev, file_storage_dir, candidate_pool_size,
101
                secondary_ip=None, vg_name=None, beparams=None, hvparams=None,
102
                enabled_hypervisors=None, default_hypervisor=None):
103
  """Initialise the cluster.
104

105
  @type candidate_pool_size: int
106
  @param candidate_pool_size: master candidate pool size
107

108
  """
109
  # TODO: complete the docstring
110
  if config.ConfigWriter.IsCluster():
111
    raise errors.OpPrereqError("Cluster is already initialised")
112

    
113
  hostname = utils.HostInfo()
114

    
115
  if hostname.ip.startswith("127."):
116
    raise errors.OpPrereqError("This host's IP resolves to the private"
117
                               " range (%s). Please fix DNS or %s." %
118
                               (hostname.ip, constants.ETC_HOSTS))
119

    
120
  if not utils.OwnIpAddress(hostname.ip):
121
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
122
                               " to %s,\nbut this ip address does not"
123
                               " belong to this host."
124
                               " Aborting." % hostname.ip)
125

    
126
  clustername = utils.HostInfo(cluster_name)
127

    
128
  if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
129
                   timeout=5):
130
    raise errors.OpPrereqError("Cluster IP already active. Aborting.")
131

    
132
  if secondary_ip:
133
    if not utils.IsValidIP(secondary_ip):
134
      raise errors.OpPrereqError("Invalid secondary ip given")
135
    if (secondary_ip != hostname.ip and
136
        not utils.OwnIpAddress(secondary_ip)):
137
      raise errors.OpPrereqError("You gave %s as secondary IP,"
138
                                 " but it does not belong to this host." %
139
                                 secondary_ip)
140
  else:
141
    secondary_ip = hostname.ip
142

    
143
  if vg_name is not None:
144
    # Check if volume group is valid
145
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
146
                                          constants.MIN_VG_SIZE)
147
    if vgstatus:
148
      raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
149
                                 " you are not using lvm" % vgstatus)
150

    
151
  file_storage_dir = os.path.normpath(file_storage_dir)
152

    
153
  if not os.path.isabs(file_storage_dir):
154
    raise errors.OpPrereqError("The file storage directory you passed is"
155
                               " not an absolute path.")
156

    
157
  if not os.path.exists(file_storage_dir):
158
    try:
159
      os.makedirs(file_storage_dir, 0750)
160
    except OSError, err:
161
      raise errors.OpPrereqError("Cannot create file storage directory"
162
                                 " '%s': %s" %
163
                                 (file_storage_dir, err))
164

    
165
  if not os.path.isdir(file_storage_dir):
166
    raise errors.OpPrereqError("The file storage directory '%s' is not"
167
                               " a directory." % file_storage_dir)
168

    
169
  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
170
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)
171

    
172
  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
173
  if result.failed:
174
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
175
                               (master_netdev,
176
                                result.output.strip()))
177

    
178
  if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
179
          os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
180
    raise errors.OpPrereqError("Init.d script '%s' missing or not"
181
                               " executable." % constants.NODE_INITD_SCRIPT)
182

    
183
  utils.CheckBEParams(beparams)
184

    
185
  # set up the inter-node password and certificate
186
  _InitGanetiServerSetup()
187

    
188
  # set up ssh config and /etc/hosts
189
  f = open(constants.SSH_HOST_RSA_PUB, 'r')
190
  try:
191
    sshline = f.read()
192
  finally:
193
    f.close()
194
  sshkey = sshline.split(" ")[1]
195

    
196
  utils.AddHostToEtcHosts(hostname.name)
197
  _InitSSHSetup(hostname.name)
198

    
199
  # init of cluster config file
200
  cluster_config = objects.Cluster(
201
    serial_no=1,
202
    rsahostkeypub=sshkey,
203
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
204
    mac_prefix=mac_prefix,
205
    volume_group_name=vg_name,
206
    default_bridge=def_bridge,
207
    tcpudp_port_pool=set(),
208
    master_node=hostname.name,
209
    master_ip=clustername.ip,
210
    master_netdev=master_netdev,
211
    cluster_name=clustername.name,
212
    file_storage_dir=file_storage_dir,
213
    enabled_hypervisors=enabled_hypervisors,
214
    default_hypervisor=default_hypervisor,
215
    beparams={constants.BEGR_DEFAULT: beparams},
216
    hvparams=hvparams,
217
    candidate_pool_size=candidate_pool_size,
218
    )
219
  master_node_config = objects.Node(name=hostname.name,
220
                                    primary_ip=hostname.ip,
221
                                    secondary_ip=secondary_ip,
222
                                    serial_no=1)
223

    
224
  cfg = InitConfig(constants.CONFIG_VERSION,
225
                   cluster_config, master_node_config)
226
  ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
227

    
228
  # start the master ip
229
  # TODO: Review rpc call from bootstrap
230
  rpc.RpcRunner.call_node_start_master(hostname.name, True)
231

    
232

    
233
def InitConfig(version, cluster_config, master_node_config,
234
               cfg_file=constants.CLUSTER_CONF_FILE):
235
  """Create the initial cluster configuration.
236

237
  It will contain the current node, which will also be the master
238
  node, and no instances.
239

240
  @type version: int
241
  @param version: Configuration version
242
  @type cluster_config: objects.Cluster
243
  @param cluster_config: Cluster configuration
244
  @type master_node_config: objects.Node
245
  @param master_node_config: Master node configuration
246
  @type file_name: string
247
  @param file_name: Configuration file path
248

249
  @rtype: ssconf.SimpleConfigWriter
250
  @returns: Initialized config instance
251

252
  """
253
  nodes = {
254
    master_node_config.name: master_node_config,
255
    }
256

    
257
  config_data = objects.ConfigData(version=version,
258
                                   cluster=cluster_config,
259
                                   nodes=nodes,
260
                                   instances={},
261
                                   serial_no=1)
262
  cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
263
  cfg.Save()
264

    
265
  return cfg
266

    
267

    
268
def FinalizeClusterDestroy(master):
269
  """Execute the last steps of cluster destroy
270

271
  This function shuts down all the daemons, completing the destroy
272
  begun in cmdlib.LUDestroyOpcode.
273

274
  """
275
  result = rpc.RpcRunner.call_node_stop_master(master, True)
276
  if result.failed or not result.data:
277
    logging.warning("Could not disable the master role")
278
  result = rpc.RpcRunner.call_node_leave_cluster(master)
279
  if result.failed or not result.data:
280
    logging.warning("Could not shutdown the node daemon and cleanup the node")
281

    
282

    
283
def SetupNodeDaemon(cluster_name, node, ssh_key_check):
284
  """Add a node to the cluster.
285

286
  This function must be called before the actual opcode, and will ssh
287
  to the remote node, copy the needed files, and start ganeti-noded,
288
  allowing the master to do the rest via normal rpc calls.
289

290
  @param cluster_name: the cluster name
291
  @param node: the name of the new node
292
  @param ssh_key_check: whether to do a strict key check
293

294
  """
295
  sshrunner = ssh.SshRunner(cluster_name)
296
  gntpem = utils.ReadFile(constants.SSL_CERT_FILE)
297
  # in the base64 pem encoding, neither '!' nor '.' are valid chars,
298
  # so we use this to detect an invalid certificate; as long as the
299
  # cert doesn't contain this, the here-document will be correctly
300
  # parsed by the shell sequence below
301
  if re.search('^!EOF\.', gntpem, re.MULTILINE):
302
    raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
303
  if not gntpem.endswith("\n"):
304
    raise errors.OpExecError("PEM must end with newline")
305

    
306
  # set up inter-node password and certificate and restarts the node daemon
307
  # and then connect with ssh to set password and start ganeti-noded
308
  # note that all the below variables are sanitized at this point,
309
  # either by being constants or by the checks above
310
  mycommand = ("umask 077 && "
311
               "cat > '%s' << '!EOF.' && \n"
312
               "%s!EOF.\n%s restart" %
313
               (constants.SSL_CERT_FILE, gntpem,
314
                constants.NODE_INITD_SCRIPT))
315

    
316
  result = sshrunner.Run(node, 'root', mycommand, batch=False,
317
                         ask_key=ssh_key_check,
318
                         use_cluster_key=False,
319
                         strict_host_check=ssh_key_check)
320
  if result.failed:
321
    raise errors.OpExecError("Remote command on node %s, error: %s,"
322
                             " output: %s" %
323
                             (node, result.fail_reason, result.output))
324

    
325

    
326
def MasterFailover():
327
  """Failover the master node.
328

329
  This checks that we are not already the master, and will cause the
330
  current master to cease being master, and the non-master to become
331
  new master.
332

333
  """
334
  sstore = ssconf.SimpleStore()
335

    
336
  old_master, new_master = ssconf.GetMasterAndMyself(sstore)
337
  node_list = sstore.GetNodeList()
338
  mc_list = sstore.GetMasterCandidates()
339

    
340
  if old_master == new_master:
341
    raise errors.OpPrereqError("This commands must be run on the node"
342
                               " where you want the new master to be."
343
                               " %s is already the master" %
344
                               old_master)
345

    
346
  if new_master not in mc_list:
347
    mc_no_master = [name for name in mc_list if name != old_master]
348
    raise errors.OpPrereqError("This node is not among the nodes marked"
349
                               " as master candidates. Only these nodes"
350
                               " can become masters. Current list of"
351
                               " master candidates is:\n"
352
                               "%s" % ('\n'.join(mc_no_master)))
353

    
354
  vote_list = GatherMasterVotes(node_list)
355

    
356
  if vote_list:
357
    voted_master = vote_list[0][0]
358
    if voted_master is None:
359
      raise errors.OpPrereqError("Cluster is inconsistent, most nodes did not"
360
                                 " respond.")
361
    elif voted_master != old_master:
362
      raise errors.OpPrereqError("I have wrong configuration, I believe the"
363
                                 " master is %s but the other nodes voted for"
364
                                 " %s. Please resync the configuration of"
365
                                 " this node." % (old_master, voted_master))
366
  # end checks
367

    
368
  rcode = 0
369

    
370
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
371

    
372
  result = rpc.RpcRunner.call_node_stop_master(old_master, True)
373
  if result.failed or not result.data:
374
    logging.error("Could not disable the master role on the old master"
375
                 " %s, please disable manually", old_master)
376

    
377
  # Here we have a phase where no master should be running
378

    
379
  # instantiate a real config writer, as we now know we have the
380
  # configuration data
381
  cfg = config.ConfigWriter()
382

    
383
  cluster_info = cfg.GetClusterInfo()
384
  cluster_info.master_node = new_master
385
  # this will also regenerate the ssconf files, since we updated the
386
  # cluster info
387
  cfg.Update(cluster_info)
388

    
389
  result = rpc.RpcRunner.call_node_start_master(new_master, True)
390
  if result.failed or not result.data:
391
    logging.error("Could not start the master role on the new master"
392
                  " %s, please check", new_master)
393
    rcode = 1
394

    
395
  return rcode
396

    
397

    
398
def GatherMasterVotes(node_list):
399
  """Check the agreement on who is the master.
400

401
  This function will return a list of (node, number of votes), ordered
402
  by the number of votes. Errors will be denoted by the key 'None'.
403

404
  Note that the sum of votes is the number of nodes this machine
405
  knows, whereas the number of entries in the list could be different
406
  (if some nodes vote for another master).
407

408
  We remove ourselves from the list since we know that (bugs aside)
409
  since we use the same source for configuration information for both
410
  backend and boostrap, we'll always vote for ourselves.
411

412
  @type node_list: list
413
  @param node_list: the list of nodes to query for master info; the current
414
      node wil be removed if it is in the list
415
  @rtype: list
416
  @return: list of (node, votes)
417

418
  """
419
  myself = utils.HostInfo().name
420
  try:
421
    node_list.remove(myself)
422
  except ValueError:
423
    pass
424
  if not node_list:
425
    # no nodes left (eventually after removing myself)
426
    return []
427
  results = rpc.RpcRunner.call_master_info(node_list)
428
  if not isinstance(results, dict):
429
    # this should not happen (unless internal error in rpc)
430
    logging.critical("Can't complete rpc call, aborting master startup")
431
    return [(None, len(node_list))]
432
  positive = negative = 0
433
  other_masters = {}
434
  votes = {}
435
  for node in results:
436
    nres = results[node]
437
    data = nres.data
438
    if nres.failed or not isinstance(data, (tuple, list)) or len(data) < 3:
439
      # here the rpc layer should have already logged errors
440
      if None not in votes:
441
        votes[None] = 0
442
      votes[None] += 1
443
      continue
444
    master_node = data[2]
445
    if master_node not in votes:
446
      votes[master_node] = 0
447
    votes[master_node] += 1
448

    
449
  vote_list = [v for v in votes.items()]
450
  # sort first on number of votes then on name, since we want None
451
  # sorted later if we have the half of the nodes not responding, and
452
  # half voting all for the same master
453
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
454

    
455
  return vote_list