Statistics
| Branch: | Tag: | Revision:

root / lib / bootstrap.py @ ea3a925f

History | View | Annotate | Download (15.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions to bootstrap a new cluster.
23

24
"""
25

    
26
import os
27
import os.path
28
import sha
29
import re
30
import logging
31

    
32
from ganeti import rpc
33
from ganeti import ssh
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import config
37
from ganeti import constants
38
from ganeti import objects
39
from ganeti import ssconf
40

    
41
from ganeti.rpc import RpcRunner
42

    
43
def _InitSSHSetup(node):
44
  """Setup the SSH configuration for the cluster.
45

46

47
  This generates a dsa keypair for root, adds the pub key to the
48
  permitted hosts and adds the hostkey to its own known hosts.
49

50
  Args:
51
    node: the name of this host as a fqdn
52

53
  """
54
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
55

    
56
  for name in priv_key, pub_key:
57
    if os.path.exists(name):
58
      utils.CreateBackup(name)
59
    utils.RemoveFile(name)
60

    
61
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
62
                         "-f", priv_key,
63
                         "-q", "-N", ""])
64
  if result.failed:
65
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
66
                             result.output)
67

    
68
  f = open(pub_key, 'r')
69
  try:
70
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
71
  finally:
72
    f.close()
73

    
74

    
75
def _InitGanetiServerSetup():
76
  """Setup the necessary configuration for the initial node daemon.
77

78
  This creates the nodepass file containing the shared password for
79
  the cluster and also generates the SSL certificate.
80

81
  """
82
  # Create pseudo random password
83
  randpass = utils.GenerateSecret()
84

    
85
  # and write it into the config file
86
  utils.WriteFile(constants.CLUSTER_PASSWORD_FILE,
87
                  data="%s\n" % randpass, mode=0400)
88

    
89
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
90
                         "-days", str(365*5), "-nodes", "-x509",
91
                         "-keyout", constants.SSL_CERT_FILE,
92
                         "-out", constants.SSL_CERT_FILE, "-batch"])
93
  if result.failed:
94
    raise errors.OpExecError("could not generate server ssl cert, command"
95
                             " %s had exitcode %s and error message %s" %
96
                             (result.cmd, result.exit_code, result.output))
97

    
98
  os.chmod(constants.SSL_CERT_FILE, 0400)
99

    
100
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
101

    
102
  if result.failed:
103
    raise errors.OpExecError("Could not start the node daemon, command %s"
104
                             " had exitcode %s and error %s" %
105
                             (result.cmd, result.exit_code, result.output))
106

    
107

    
108
def InitCluster(cluster_name, hypervisor_type, mac_prefix, def_bridge,
109
                master_netdev, file_storage_dir,
110
                secondary_ip=None,
111
                vg_name=None, beparams=None, hvparams=None,
112
                enabled_hypervisors=None):
113
  """Initialise the cluster.
114

115
  """
116
  if config.ConfigWriter.IsCluster():
117
    raise errors.OpPrereqError("Cluster is already initialised")
118

    
119
  if hypervisor_type == constants.HT_XEN_HVM:
120
    if not os.path.exists(constants.VNC_PASSWORD_FILE):
121
      raise errors.OpPrereqError("Please prepare the cluster VNC"
122
                                 "password file %s" %
123
                                 constants.VNC_PASSWORD_FILE)
124

    
125
  hostname = utils.HostInfo()
126

    
127
  if hostname.ip.startswith("127."):
128
    raise errors.OpPrereqError("This host's IP resolves to the private"
129
                               " range (%s). Please fix DNS or %s." %
130
                               (hostname.ip, constants.ETC_HOSTS))
131

    
132
  if not utils.OwnIpAddress(hostname.ip):
133
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
134
                               " to %s,\nbut this ip address does not"
135
                               " belong to this host."
136
                               " Aborting." % hostname.ip)
137

    
138
  clustername = utils.HostInfo(cluster_name)
139

    
140
  if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
141
                   timeout=5):
142
    raise errors.OpPrereqError("Cluster IP already active. Aborting.")
143

    
144
  if secondary_ip:
145
    if not utils.IsValidIP(secondary_ip):
146
      raise errors.OpPrereqError("Invalid secondary ip given")
147
    if (secondary_ip != hostname.ip and
148
        not utils.OwnIpAddress(secondary_ip)):
149
      raise errors.OpPrereqError("You gave %s as secondary IP,"
150
                                 " but it does not belong to this host." %
151
                                 secondary_ip)
152
  else:
153
    secondary_ip = hostname.ip
154

    
155
  if vg_name is not None:
156
    # Check if volume group is valid
157
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
158
                                          constants.MIN_VG_SIZE)
159
    if vgstatus:
160
      raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
161
                                 " you are not using lvm" % vgstatus)
162

    
163
  file_storage_dir = os.path.normpath(file_storage_dir)
164

    
165
  if not os.path.isabs(file_storage_dir):
166
    raise errors.OpPrereqError("The file storage directory you passed is"
167
                               " not an absolute path.")
168

    
169
  if not os.path.exists(file_storage_dir):
170
    try:
171
      os.makedirs(file_storage_dir, 0750)
172
    except OSError, err:
173
      raise errors.OpPrereqError("Cannot create file storage directory"
174
                                 " '%s': %s" %
175
                                 (file_storage_dir, err))
176

    
177
  if not os.path.isdir(file_storage_dir):
178
    raise errors.OpPrereqError("The file storage directory '%s' is not"
179
                               " a directory." % file_storage_dir)
180

    
181
  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
182
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)
183

    
184
  if hypervisor_type not in constants.HYPER_TYPES:
185
    raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
186
                               hypervisor_type)
187

    
188
  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
189
  if result.failed:
190
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
191
                               (master_netdev,
192
                                result.output.strip()))
193

    
194
  if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
195
          os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
196
    raise errors.OpPrereqError("Init.d script '%s' missing or not"
197
                               " executable." % constants.NODE_INITD_SCRIPT)
198

    
199
  # set up the inter-node password and certificate
200
  _InitGanetiServerSetup()
201

    
202
  # set up ssh config and /etc/hosts
203
  f = open(constants.SSH_HOST_RSA_PUB, 'r')
204
  try:
205
    sshline = f.read()
206
  finally:
207
    f.close()
208
  sshkey = sshline.split(" ")[1]
209

    
210
  utils.AddHostToEtcHosts(hostname.name)
211
  _InitSSHSetup(hostname.name)
212

    
213
  # init of cluster config file
214
  cluster_config = objects.Cluster(
215
    serial_no=1,
216
    rsahostkeypub=sshkey,
217
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
218
    mac_prefix=mac_prefix,
219
    volume_group_name=vg_name,
220
    default_bridge=def_bridge,
221
    tcpudp_port_pool=set(),
222
    hypervisor=hypervisor_type,
223
    master_node=hostname.name,
224
    master_ip=clustername.ip,
225
    master_netdev=master_netdev,
226
    cluster_name=clustername.name,
227
    file_storage_dir=file_storage_dir,
228
    enabled_hypervisors=enabled_hypervisors,
229
    beparams={constants.BEGR_DEFAULT: beparams},
230
    hvparams=hvparams,
231
    )
232
  master_node_config = objects.Node(name=hostname.name,
233
                                    primary_ip=hostname.ip,
234
                                    secondary_ip=secondary_ip)
235

    
236
  cfg = InitConfig(constants.CONFIG_VERSION,
237
                   cluster_config, master_node_config)
238
  ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
239

    
240
  # start the master ip
241
  # TODO: Review rpc call from bootstrap
242
  RpcRunner.call_node_start_master(hostname.name, True)
243

    
244

    
245
def InitConfig(version, cluster_config, master_node_config,
246
               cfg_file=constants.CLUSTER_CONF_FILE):
247
  """Create the initial cluster configuration.
248

249
  It will contain the current node, which will also be the master
250
  node, and no instances.
251

252
  @type version: int
253
  @param version: Configuration version
254
  @type cluster_config: objects.Cluster
255
  @param cluster_config: Cluster configuration
256
  @type master_node_config: objects.Node
257
  @param master_node_config: Master node configuration
258
  @type file_name: string
259
  @param file_name: Configuration file path
260

261
  @rtype: ssconf.SimpleConfigWriter
262
  @returns: Initialized config instance
263

264
  """
265
  nodes = {
266
    master_node_config.name: master_node_config,
267
    }
268

    
269
  config_data = objects.ConfigData(version=version,
270
                                   cluster=cluster_config,
271
                                   nodes=nodes,
272
                                   instances={},
273
                                   serial_no=1)
274
  cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
275
  cfg.Save()
276

    
277
  return cfg
278

    
279

    
280
def FinalizeClusterDestroy(master):
281
  """Execute the last steps of cluster destroy
282

283
  This function shuts down all the daemons, completing the destroy
284
  begun in cmdlib.LUDestroyOpcode.
285

286
  """
287
  if not RpcRunner.call_node_stop_master(master, True):
288
    logging.warning("Could not disable the master role")
289
  if not RpcRunner.call_node_leave_cluster(master):
290
    logging.warning("Could not shutdown the node daemon and cleanup the node")
291

    
292

    
293
def SetupNodeDaemon(node, ssh_key_check):
294
  """Add a node to the cluster.
295

296
  This function must be called before the actual opcode, and will ssh
297
  to the remote node, copy the needed files, and start ganeti-noded,
298
  allowing the master to do the rest via normal rpc calls.
299

300
  Args:
301
    node: fully qualified domain name for the new node
302

303
  """
304
  cfg = ssconf.SimpleConfigReader()
305
  sshrunner = ssh.SshRunner(cfg.GetClusterName())
306
  gntpass = utils.GetNodeDaemonPassword()
307
  if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
308
    raise errors.OpExecError("ganeti password corruption detected")
309
  f = open(constants.SSL_CERT_FILE)
310
  try:
311
    gntpem = f.read(8192)
312
  finally:
313
    f.close()
314
  # in the base64 pem encoding, neither '!' nor '.' are valid chars,
315
  # so we use this to detect an invalid certificate; as long as the
316
  # cert doesn't contain this, the here-document will be correctly
317
  # parsed by the shell sequence below
318
  if re.search('^!EOF\.', gntpem, re.MULTILINE):
319
    raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
320
  if not gntpem.endswith("\n"):
321
    raise errors.OpExecError("PEM must end with newline")
322

    
323
  # set up inter-node password and certificate and restarts the node daemon
324
  # and then connect with ssh to set password and start ganeti-noded
325
  # note that all the below variables are sanitized at this point,
326
  # either by being constants or by the checks above
327
  mycommand = ("umask 077 && "
328
               "echo '%s' > '%s' && "
329
               "cat > '%s' << '!EOF.' && \n"
330
               "%s!EOF.\n%s restart" %
331
               (gntpass, constants.CLUSTER_PASSWORD_FILE,
332
                constants.SSL_CERT_FILE, gntpem,
333
                constants.NODE_INITD_SCRIPT))
334

    
335
  result = sshrunner.Run(node, 'root', mycommand, batch=False,
336
                         ask_key=ssh_key_check,
337
                         use_cluster_key=False,
338
                         strict_host_check=ssh_key_check)
339
  if result.failed:
340
    raise errors.OpExecError("Remote command on node %s, error: %s,"
341
                             " output: %s" %
342
                             (node, result.fail_reason, result.output))
343

    
344
  return 0
345

    
346

    
347
def MasterFailover():
348
  """Failover the master node.
349

350
  This checks that we are not already the master, and will cause the
351
  current master to cease being master, and the non-master to become
352
  new master.
353

354
  """
355
  cfg = ssconf.SimpleConfigWriter()
356

    
357
  new_master = utils.HostInfo().name
358
  old_master = cfg.GetMasterNode()
359
  node_list = cfg.GetNodeList()
360

    
361
  if old_master == new_master:
362
    raise errors.OpPrereqError("This commands must be run on the node"
363
                               " where you want the new master to be."
364
                               " %s is already the master" %
365
                               old_master)
366

    
367
  vote_list = GatherMasterVotes(node_list)
368

    
369
  if vote_list:
370
    voted_master = vote_list[0][0]
371
    if voted_master is None:
372
      raise errors.OpPrereqError("Cluster is inconsistent, most nodes did not"
373
                                 " respond.")
374
    elif voted_master != old_master:
375
      raise errors.OpPrereqError("I have wrong configuration, I believe the"
376
                                 " master is %s but the other nodes voted for"
377
                                 " %s. Please resync the configuration of"
378
                                 " this node." % (old_master, voted_master))
379
  # end checks
380

    
381
  rcode = 0
382

    
383
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
384

    
385
  if not RpcRunner.call_node_stop_master(old_master, True):
386
    logging.error("Could not disable the master role on the old master"
387
                 " %s, please disable manually", old_master)
388

    
389
  cfg.SetMasterNode(new_master)
390
  cfg.Save()
391

    
392
  # Here we have a phase where no master should be running
393

    
394
  if not RpcRunner.call_upload_file(cfg.GetNodeList(),
395
                                    constants.CLUSTER_CONF_FILE):
396
    logging.error("Could not distribute the new configuration"
397
                  " to the other nodes, please check.")
398

    
399

    
400
  if not RpcRunner.call_node_start_master(new_master, True):
401
    logging.error("Could not start the master role on the new master"
402
                  " %s, please check", new_master)
403
    rcode = 1
404

    
405
  return rcode
406

    
407

    
408
def GatherMasterVotes(node_list):
409
  """Check the agreement on who is the master.
410

411
  This function will return a list of (node, number of votes), ordered
412
  by the number of votes. Errors will be denoted by the key 'None'.
413

414
  Note that the sum of votes is the number of nodes this machine
415
  knows, whereas the number of entries in the list could be different
416
  (if some nodes vote for another master).
417

418
  We remove ourselves from the list since we know that (bugs aside)
419
  since we use the same source for configuration information for both
420
  backend and boostrap, we'll always vote for ourselves.
421

422
  @type node_list: list
423
  @param node_list: the list of nodes to query for master info; the current
424
      node wil be removed if it is in the list
425
  @rtype: list
426
  @return: list of (node, votes)
427

428
  """
429
  myself = utils.HostInfo().name
430
  try:
431
    node_list.remove(myself)
432
  except ValueError:
433
    pass
434
  if not node_list:
435
    # no nodes left (eventually after removing myself)
436
    return []
437
  results = rpc.RpcRunner.call_master_info(node_list)
438
  if not isinstance(results, dict):
439
    # this should not happen (unless internal error in rpc)
440
    logging.critical("Can't complete rpc call, aborting master startup")
441
    return [(None, len(node_list))]
442
  positive = negative = 0
443
  other_masters = {}
444
  votes = {}
445
  for node in results:
446
    if not isinstance(results[node], (tuple, list)) or len(results[node]) < 3:
447
      # here the rpc layer should have already logged errors
448
      if None not in votes:
449
        votes[None] = 0
450
      votes[None] += 1
451
      continue
452
    master_node = results[node][2]
453
    if master_node not in votes:
454
      votes[master_node] = 0
455
    votes[master_node] += 1
456

    
457
  vote_list = [v for v in votes.items()]
458
  # sort first on number of votes then on name, since we want None
459
  # sorted later if we have the half of the nodes not responding, and
460
  # half voting all for the same master
461
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
462

    
463
  return vote_list