Statistics
| Branch: | Tag: | Revision:

root / lib / bootstrap.py @ b9222f32

History | View | Annotate | Download (15 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions to bootstrap a new cluster.
23

24
"""
25

    
26
import os
27
import os.path
28
import sha
29
import re
30
import logging
31

    
32
from ganeti import rpc
33
from ganeti import ssh
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import config
37
from ganeti import constants
38
from ganeti import objects
39
from ganeti import ssconf
40

    
41
def _InitSSHSetup(node):
42
  """Setup the SSH configuration for the cluster.
43

44

45
  This generates a dsa keypair for root, adds the pub key to the
46
  permitted hosts and adds the hostkey to its own known hosts.
47

48
  Args:
49
    node: the name of this host as a fqdn
50

51
  """
52
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
53

    
54
  for name in priv_key, pub_key:
55
    if os.path.exists(name):
56
      utils.CreateBackup(name)
57
    utils.RemoveFile(name)
58

    
59
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
60
                         "-f", priv_key,
61
                         "-q", "-N", ""])
62
  if result.failed:
63
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
64
                             result.output)
65

    
66
  f = open(pub_key, 'r')
67
  try:
68
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
69
  finally:
70
    f.close()
71

    
72

    
73
def _InitGanetiServerSetup():
74
  """Setup the necessary configuration for the initial node daemon.
75

76
  This creates the nodepass file containing the shared password for
77
  the cluster and also generates the SSL certificate.
78

79
  """
80
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
81
                         "-days", str(365*5), "-nodes", "-x509",
82
                         "-keyout", constants.SSL_CERT_FILE,
83
                         "-out", constants.SSL_CERT_FILE, "-batch"])
84
  if result.failed:
85
    raise errors.OpExecError("could not generate server ssl cert, command"
86
                             " %s had exitcode %s and error message %s" %
87
                             (result.cmd, result.exit_code, result.output))
88

    
89
  os.chmod(constants.SSL_CERT_FILE, 0400)
90

    
91
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
92

    
93
  if result.failed:
94
    raise errors.OpExecError("Could not start the node daemon, command %s"
95
                             " had exitcode %s and error %s" %
96
                             (result.cmd, result.exit_code, result.output))
97

    
98

    
99
def InitCluster(cluster_name, mac_prefix, def_bridge,
100
                master_netdev, file_storage_dir,
101
                secondary_ip=None,
102
                vg_name=None, beparams=None, hvparams=None,
103
                enabled_hypervisors=None, default_hypervisor=None):
104
  """Initialise the cluster.
105

106
  """
107
  if config.ConfigWriter.IsCluster():
108
    raise errors.OpPrereqError("Cluster is already initialised")
109

    
110
  hostname = utils.HostInfo()
111

    
112
  if hostname.ip.startswith("127."):
113
    raise errors.OpPrereqError("This host's IP resolves to the private"
114
                               " range (%s). Please fix DNS or %s." %
115
                               (hostname.ip, constants.ETC_HOSTS))
116

    
117
  if not utils.OwnIpAddress(hostname.ip):
118
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
119
                               " to %s,\nbut this ip address does not"
120
                               " belong to this host."
121
                               " Aborting." % hostname.ip)
122

    
123
  clustername = utils.HostInfo(cluster_name)
124

    
125
  if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
126
                   timeout=5):
127
    raise errors.OpPrereqError("Cluster IP already active. Aborting.")
128

    
129
  if secondary_ip:
130
    if not utils.IsValidIP(secondary_ip):
131
      raise errors.OpPrereqError("Invalid secondary ip given")
132
    if (secondary_ip != hostname.ip and
133
        not utils.OwnIpAddress(secondary_ip)):
134
      raise errors.OpPrereqError("You gave %s as secondary IP,"
135
                                 " but it does not belong to this host." %
136
                                 secondary_ip)
137
  else:
138
    secondary_ip = hostname.ip
139

    
140
  if vg_name is not None:
141
    # Check if volume group is valid
142
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
143
                                          constants.MIN_VG_SIZE)
144
    if vgstatus:
145
      raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
146
                                 " you are not using lvm" % vgstatus)
147

    
148
  file_storage_dir = os.path.normpath(file_storage_dir)
149

    
150
  if not os.path.isabs(file_storage_dir):
151
    raise errors.OpPrereqError("The file storage directory you passed is"
152
                               " not an absolute path.")
153

    
154
  if not os.path.exists(file_storage_dir):
155
    try:
156
      os.makedirs(file_storage_dir, 0750)
157
    except OSError, err:
158
      raise errors.OpPrereqError("Cannot create file storage directory"
159
                                 " '%s': %s" %
160
                                 (file_storage_dir, err))
161

    
162
  if not os.path.isdir(file_storage_dir):
163
    raise errors.OpPrereqError("The file storage directory '%s' is not"
164
                               " a directory." % file_storage_dir)
165

    
166
  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
167
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)
168

    
169
  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
170
  if result.failed:
171
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
172
                               (master_netdev,
173
                                result.output.strip()))
174

    
175
  if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
176
          os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
177
    raise errors.OpPrereqError("Init.d script '%s' missing or not"
178
                               " executable." % constants.NODE_INITD_SCRIPT)
179

    
180
  # set up the inter-node password and certificate
181
  _InitGanetiServerSetup()
182

    
183
  # set up ssh config and /etc/hosts
184
  f = open(constants.SSH_HOST_RSA_PUB, 'r')
185
  try:
186
    sshline = f.read()
187
  finally:
188
    f.close()
189
  sshkey = sshline.split(" ")[1]
190

    
191
  utils.AddHostToEtcHosts(hostname.name)
192
  _InitSSHSetup(hostname.name)
193

    
194
  # init of cluster config file
195
  cluster_config = objects.Cluster(
196
    serial_no=1,
197
    rsahostkeypub=sshkey,
198
    highest_used_port=(constants.FIRST_DRBD_PORT - 1),
199
    mac_prefix=mac_prefix,
200
    volume_group_name=vg_name,
201
    default_bridge=def_bridge,
202
    tcpudp_port_pool=set(),
203
    master_node=hostname.name,
204
    master_ip=clustername.ip,
205
    master_netdev=master_netdev,
206
    cluster_name=clustername.name,
207
    file_storage_dir=file_storage_dir,
208
    enabled_hypervisors=enabled_hypervisors,
209
    default_hypervisor=default_hypervisor,
210
    beparams={constants.BEGR_DEFAULT: beparams},
211
    hvparams=hvparams,
212
    )
213
  master_node_config = objects.Node(name=hostname.name,
214
                                    primary_ip=hostname.ip,
215
                                    secondary_ip=secondary_ip,
216
                                    serial_no=1)
217

    
218
  cfg = InitConfig(constants.CONFIG_VERSION,
219
                   cluster_config, master_node_config)
220
  ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
221

    
222
  # start the master ip
223
  # TODO: Review rpc call from bootstrap
224
  rpc.RpcRunner.call_node_start_master(hostname.name, True)
225

    
226

    
227
def InitConfig(version, cluster_config, master_node_config,
228
               cfg_file=constants.CLUSTER_CONF_FILE):
229
  """Create the initial cluster configuration.
230

231
  It will contain the current node, which will also be the master
232
  node, and no instances.
233

234
  @type version: int
235
  @param version: Configuration version
236
  @type cluster_config: objects.Cluster
237
  @param cluster_config: Cluster configuration
238
  @type master_node_config: objects.Node
239
  @param master_node_config: Master node configuration
240
  @type file_name: string
241
  @param file_name: Configuration file path
242

243
  @rtype: ssconf.SimpleConfigWriter
244
  @returns: Initialized config instance
245

246
  """
247
  nodes = {
248
    master_node_config.name: master_node_config,
249
    }
250

    
251
  config_data = objects.ConfigData(version=version,
252
                                   cluster=cluster_config,
253
                                   nodes=nodes,
254
                                   instances={},
255
                                   serial_no=1)
256
  cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file)
257
  cfg.Save()
258

    
259
  return cfg
260

    
261

    
262
def FinalizeClusterDestroy(master):
263
  """Execute the last steps of cluster destroy
264

265
  This function shuts down all the daemons, completing the destroy
266
  begun in cmdlib.LUDestroyOpcode.
267

268
  """
269
  if not rpc.RpcRunner.call_node_stop_master(master, True):
270
    logging.warning("Could not disable the master role")
271
  if not rpc.RpcRunner.call_node_leave_cluster(master):
272
    logging.warning("Could not shutdown the node daemon and cleanup the node")
273

    
274

    
275
def SetupNodeDaemon(cluster_name, node, ssh_key_check):
276
  """Add a node to the cluster.
277

278
  This function must be called before the actual opcode, and will ssh
279
  to the remote node, copy the needed files, and start ganeti-noded,
280
  allowing the master to do the rest via normal rpc calls.
281

282
  @param cluster_name: the cluster name
283
  @param node: the name of the new node
284
  @param ssh_key_check: whether to do a strict key check
285

286
  """
287
  sshrunner = ssh.SshRunner(cluster_name)
288
  gntpem = utils.ReadFile(constants.SSL_CERT_FILE)
289
  # in the base64 pem encoding, neither '!' nor '.' are valid chars,
290
  # so we use this to detect an invalid certificate; as long as the
291
  # cert doesn't contain this, the here-document will be correctly
292
  # parsed by the shell sequence below
293
  if re.search('^!EOF\.', gntpem, re.MULTILINE):
294
    raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
295
  if not gntpem.endswith("\n"):
296
    raise errors.OpExecError("PEM must end with newline")
297

    
298
  # set up inter-node password and certificate and restarts the node daemon
299
  # and then connect with ssh to set password and start ganeti-noded
300
  # note that all the below variables are sanitized at this point,
301
  # either by being constants or by the checks above
302
  mycommand = ("umask 077 && "
303
               "cat > '%s' << '!EOF.' && \n"
304
               "%s!EOF.\n%s restart" %
305
               (constants.SSL_CERT_FILE, gntpem,
306
                constants.NODE_INITD_SCRIPT))
307

    
308
  result = sshrunner.Run(node, 'root', mycommand, batch=False,
309
                         ask_key=ssh_key_check,
310
                         use_cluster_key=False,
311
                         strict_host_check=ssh_key_check)
312
  if result.failed:
313
    raise errors.OpExecError("Remote command on node %s, error: %s,"
314
                             " output: %s" %
315
                             (node, result.fail_reason, result.output))
316

    
317

    
318
def MasterFailover():
319
  """Failover the master node.
320

321
  This checks that we are not already the master, and will cause the
322
  current master to cease being master, and the non-master to become
323
  new master.
324

325
  """
326
  cfg = ssconf.SimpleConfigWriter()
327

    
328
  new_master = utils.HostInfo().name
329
  old_master = cfg.GetMasterNode()
330
  node_list = cfg.GetNodeList()
331

    
332
  if old_master == new_master:
333
    raise errors.OpPrereqError("This commands must be run on the node"
334
                               " where you want the new master to be."
335
                               " %s is already the master" %
336
                               old_master)
337

    
338
  vote_list = GatherMasterVotes(node_list)
339

    
340
  if vote_list:
341
    voted_master = vote_list[0][0]
342
    if voted_master is None:
343
      raise errors.OpPrereqError("Cluster is inconsistent, most nodes did not"
344
                                 " respond.")
345
    elif voted_master != old_master:
346
      raise errors.OpPrereqError("I have wrong configuration, I believe the"
347
                                 " master is %s but the other nodes voted for"
348
                                 " %s. Please resync the configuration of"
349
                                 " this node." % (old_master, voted_master))
350
  # end checks
351

    
352
  rcode = 0
353

    
354
  logging.info("Setting master to %s, old master: %s", new_master, old_master)
355

    
356
  if not rpc.RpcRunner.call_node_stop_master(old_master, True):
357
    logging.error("Could not disable the master role on the old master"
358
                 " %s, please disable manually", old_master)
359

    
360
  cfg.SetMasterNode(new_master)
361
  cfg.Save()
362

    
363
  # Here we have a phase where no master should be running
364

    
365
  if not rpc.RpcRunner.call_upload_file(cfg.GetNodeList(),
366
                                    constants.CLUSTER_CONF_FILE):
367
    logging.error("Could not distribute the new configuration"
368
                  " to the other nodes, please check.")
369

    
370

    
371
  if not rpc.RpcRunner.call_node_start_master(new_master, True):
372
    logging.error("Could not start the master role on the new master"
373
                  " %s, please check", new_master)
374
    rcode = 1
375

    
376
  return rcode
377

    
378

    
379
def GatherMasterVotes(node_list):
380
  """Check the agreement on who is the master.
381

382
  This function will return a list of (node, number of votes), ordered
383
  by the number of votes. Errors will be denoted by the key 'None'.
384

385
  Note that the sum of votes is the number of nodes this machine
386
  knows, whereas the number of entries in the list could be different
387
  (if some nodes vote for another master).
388

389
  We remove ourselves from the list since we know that (bugs aside)
390
  since we use the same source for configuration information for both
391
  backend and boostrap, we'll always vote for ourselves.
392

393
  @type node_list: list
394
  @param node_list: the list of nodes to query for master info; the current
395
      node wil be removed if it is in the list
396
  @rtype: list
397
  @return: list of (node, votes)
398

399
  """
400
  myself = utils.HostInfo().name
401
  try:
402
    node_list.remove(myself)
403
  except ValueError:
404
    pass
405
  if not node_list:
406
    # no nodes left (eventually after removing myself)
407
    return []
408
  results = rpc.RpcRunner.call_master_info(node_list)
409
  if not isinstance(results, dict):
410
    # this should not happen (unless internal error in rpc)
411
    logging.critical("Can't complete rpc call, aborting master startup")
412
    return [(None, len(node_list))]
413
  positive = negative = 0
414
  other_masters = {}
415
  votes = {}
416
  for node in results:
417
    if not isinstance(results[node], (tuple, list)) or len(results[node]) < 3:
418
      # here the rpc layer should have already logged errors
419
      if None not in votes:
420
        votes[None] = 0
421
      votes[None] += 1
422
      continue
423
    master_node = results[node][2]
424
    if master_node not in votes:
425
      votes[master_node] = 0
426
    votes[master_node] += 1
427

    
428
  vote_list = [v for v in votes.items()]
429
  # sort first on number of votes then on name, since we want None
430
  # sorted later if we have the half of the nodes not responding, and
431
  # half voting all for the same master
432
  vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
433

    
434
  return vote_list