Statistics
| Branch: | Tag: | Revision:

root / lib / bootstrap.py @ c4b6c29c

History | View | Annotate | Download (11.9 kB)

1
#
2
#
3

    
4
# Copyright (C) 2006, 2007, 2008 Google Inc.
5
#
6
# This program is free software; you can redistribute it and/or modify
7
# it under the terms of the GNU General Public License as published by
8
# the Free Software Foundation; either version 2 of the License, or
9
# (at your option) any later version.
10
#
11
# This program is distributed in the hope that it will be useful, but
12
# WITHOUT ANY WARRANTY; without even the implied warranty of
13
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14
# General Public License for more details.
15
#
16
# You should have received a copy of the GNU General Public License
17
# along with this program; if not, write to the Free Software
18
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19
# 02110-1301, USA.
20

    
21

    
22
"""Functions to bootstrap a new cluster.
23

24
"""
25

    
26
import os
27
import os.path
28
import sha
29
import re
30
import logging
31

    
32
from ganeti import rpc
33
from ganeti import ssh
34
from ganeti import utils
35
from ganeti import errors
36
from ganeti import config
37
from ganeti import constants
38
from ganeti import ssconf
39

    
40

    
41
def _InitSSHSetup(node):
42
  """Setup the SSH configuration for the cluster.
43

44

45
  This generates a dsa keypair for root, adds the pub key to the
46
  permitted hosts and adds the hostkey to its own known hosts.
47

48
  Args:
49
    node: the name of this host as a fqdn
50

51
  """
52
  priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
53

    
54
  for name in priv_key, pub_key:
55
    if os.path.exists(name):
56
      utils.CreateBackup(name)
57
    utils.RemoveFile(name)
58

    
59
  result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
60
                         "-f", priv_key,
61
                         "-q", "-N", ""])
62
  if result.failed:
63
    raise errors.OpExecError("Could not generate ssh keypair, error %s" %
64
                             result.output)
65

    
66
  f = open(pub_key, 'r')
67
  try:
68
    utils.AddAuthorizedKey(auth_keys, f.read(8192))
69
  finally:
70
    f.close()
71

    
72

    
73
def _InitGanetiServerSetup(ss):
74
  """Setup the necessary configuration for the initial node daemon.
75

76
  This creates the nodepass file containing the shared password for
77
  the cluster and also generates the SSL certificate.
78

79
  Args:
80
    ss: A WritableSimpleStore
81

82
  """
83
  # Create pseudo random password
84
  randpass = sha.new(os.urandom(64)).hexdigest()
85
  # and write it into sstore
86
  ss.SetKey(ss.SS_NODED_PASS, randpass)
87

    
88
  result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024",
89
                         "-days", str(365*5), "-nodes", "-x509",
90
                         "-keyout", constants.SSL_CERT_FILE,
91
                         "-out", constants.SSL_CERT_FILE, "-batch"])
92
  if result.failed:
93
    raise errors.OpExecError("could not generate server ssl cert, command"
94
                             " %s had exitcode %s and error message %s" %
95
                             (result.cmd, result.exit_code, result.output))
96

    
97
  os.chmod(constants.SSL_CERT_FILE, 0400)
98

    
99
  result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
100

    
101
  if result.failed:
102
    raise errors.OpExecError("Could not start the node daemon, command %s"
103
                             " had exitcode %s and error %s" %
104
                             (result.cmd, result.exit_code, result.output))
105

    
106

    
107
def InitCluster(cluster_name, hypervisor_type, mac_prefix, def_bridge,
108
                master_netdev, file_storage_dir,
109
                secondary_ip=None,
110
                vg_name=None):
111
  """Initialise the cluster.
112

113
  """
114
  if config.ConfigWriter.IsCluster():
115
    raise errors.OpPrereqError("Cluster is already initialised")
116

    
117
  if hypervisor_type == constants.HT_XEN_HVM31:
118
    if not os.path.exists(constants.VNC_PASSWORD_FILE):
119
      raise errors.OpPrereqError("Please prepare the cluster VNC"
120
                                 "password file %s" %
121
                                 constants.VNC_PASSWORD_FILE)
122

    
123
  hostname = utils.HostInfo()
124

    
125
  if hostname.ip.startswith("127."):
126
    raise errors.OpPrereqError("This host's IP resolves to the private"
127
                               " range (%s). Please fix DNS or %s." %
128
                               (hostname.ip, constants.ETC_HOSTS))
129

    
130
  if not utils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT,
131
                       source=constants.LOCALHOST_IP_ADDRESS):
132
    raise errors.OpPrereqError("Inconsistency: this host's name resolves"
133
                               " to %s,\nbut this ip address does not"
134
                               " belong to this host."
135
                               " Aborting." % hostname.ip)
136

    
137
  clustername = utils.HostInfo(cluster_name)
138

    
139
  if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
140
                   timeout=5):
141
    raise errors.OpPrereqError("Cluster IP already active. Aborting.")
142

    
143
  if secondary_ip:
144
    if not utils.IsValidIP(secondary_ip):
145
      raise errors.OpPrereqError("Invalid secondary ip given")
146
    if (secondary_ip != hostname.ip and
147
        (not utils.TcpPing(secondary_ip, constants.DEFAULT_NODED_PORT,
148
                           source=constants.LOCALHOST_IP_ADDRESS))):
149
      raise errors.OpPrereqError("You gave %s as secondary IP,"
150
                                 " but it does not belong to this host." %
151
                                 secondary_ip)
152

    
153
  if vg_name is not None:
154
    # Check if volume group is valid
155
    vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
156
                                          constants.MIN_VG_SIZE)
157
    if vgstatus:
158
      raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
159
                                 " you are not using lvm" % vgstatus)
160

    
161
  file_storage_dir = os.path.normpath(file_storage_dir)
162

    
163
  if not os.path.isabs(file_storage_dir):
164
    raise errors.OpPrereqError("The file storage directory you passed is"
165
                               " not an absolute path.")
166

    
167
  if not os.path.exists(file_storage_dir):
168
    try:
169
      os.makedirs(file_storage_dir, 0750)
170
    except OSError, err:
171
      raise errors.OpPrereqError("Cannot create file storage directory"
172
                                 " '%s': %s" %
173
                                 (file_storage_dir, err))
174

    
175
  if not os.path.isdir(file_storage_dir):
176
    raise errors.OpPrereqError("The file storage directory '%s' is not"
177
                               " a directory." % file_storage_dir)
178

    
179
  if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
180
    raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix)
181

    
182
  if hypervisor_type not in constants.HYPER_TYPES:
183
    raise errors.OpPrereqError("Invalid hypervisor type given '%s'" %
184
                               hypervisor_type)
185

    
186
  result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
187
  if result.failed:
188
    raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
189
                               (master_netdev,
190
                                result.output.strip()))
191

    
192
  if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and
193
          os.access(constants.NODE_INITD_SCRIPT, os.X_OK)):
194
    raise errors.OpPrereqError("Init.d script '%s' missing or not"
195
                               " executable." % constants.NODE_INITD_SCRIPT)
196

    
197
  # set up the simple store
198
  ss = ssconf.WritableSimpleStore()
199
  ss.SetKey(ss.SS_HYPERVISOR, hypervisor_type)
200
  ss.SetKey(ss.SS_MASTER_NODE, hostname.name)
201
  ss.SetKey(ss.SS_MASTER_IP, clustername.ip)
202
  ss.SetKey(ss.SS_MASTER_NETDEV, master_netdev)
203
  ss.SetKey(ss.SS_CLUSTER_NAME, clustername.name)
204
  ss.SetKey(ss.SS_FILE_STORAGE_DIR, file_storage_dir)
205
  ss.SetKey(ss.SS_CONFIG_VERSION, constants.CONFIG_VERSION)
206

    
207
  # set up the inter-node password and certificate
208
  _InitGanetiServerSetup(ss)
209

    
210
  # set up ssh config and /etc/hosts
211
  f = open(constants.SSH_HOST_RSA_PUB, 'r')
212
  try:
213
    sshline = f.read()
214
  finally:
215
    f.close()
216
  sshkey = sshline.split(" ")[1]
217

    
218
  utils.AddHostToEtcHosts(hostname.name)
219
  _InitSSHSetup(hostname.name)
220

    
221
  # init of cluster config file
222
  cfg = config.ConfigWriter()
223
  cfg.InitConfig(hostname.name, hostname.ip, secondary_ip, sshkey,
224
                 mac_prefix, vg_name, def_bridge)
225

    
226
  ssh.WriteKnownHostsFile(cfg, ss, constants.SSH_KNOWN_HOSTS_FILE)
227

    
228
  # start the master ip
229
  # TODO: Review rpc call from bootstrap
230
  rpc.call_node_start_master(hostname.name, True)
231

    
232

    
233
def FinalizeClusterDestroy(master):
234
  """Execute the last steps of cluster destroy
235

236
  This function shuts down all the daemons, completing the destroy
237
  begun in cmdlib.LUDestroyOpcode.
238

239
  """
240
  if not rpc.call_node_stop_master(master, True):
241
    logging.warning("Could not disable the master role")
242
  if not rpc.call_node_leave_cluster(master):
243
    logging.warning("Could not shutdown the node daemon and cleanup the node")
244

    
245

    
246
def SetupNodeDaemon(node, ssh_key_check):
247
  """Add a node to the cluster.
248

249
  This function must be called before the actual opcode, and will ssh
250
  to the remote node, copy the needed files, and start ganeti-noded,
251
  allowing the master to do the rest via normal rpc calls.
252

253
  Args:
254
    node: fully qualified domain name for the new node
255

256
  """
257
  ss = ssconf.SimpleStore()
258
  sshrunner = ssh.SshRunner(ss)
259
  gntpass = ss.GetNodeDaemonPassword()
260
  if not re.match('^[a-zA-Z0-9.]{1,64}$', gntpass):
261
    raise errors.OpExecError("ganeti password corruption detected")
262
  f = open(constants.SSL_CERT_FILE)
263
  try:
264
    gntpem = f.read(8192)
265
  finally:
266
    f.close()
267
  # in the base64 pem encoding, neither '!' nor '.' are valid chars,
268
  # so we use this to detect an invalid certificate; as long as the
269
  # cert doesn't contain this, the here-document will be correctly
270
  # parsed by the shell sequence below
271
  if re.search('^!EOF\.', gntpem, re.MULTILINE):
272
    raise errors.OpExecError("invalid PEM encoding in the SSL certificate")
273
  if not gntpem.endswith("\n"):
274
    raise errors.OpExecError("PEM must end with newline")
275

    
276
  # set up inter-node password and certificate and restarts the node daemon
277
  # and then connect with ssh to set password and start ganeti-noded
278
  # note that all the below variables are sanitized at this point,
279
  # either by being constants or by the checks above
280
  mycommand = ("umask 077 && "
281
               "echo '%s' > '%s' && "
282
               "cat > '%s' << '!EOF.' && \n"
283
               "%s!EOF.\n%s restart" %
284
               (gntpass, ss.KeyToFilename(ss.SS_NODED_PASS),
285
                constants.SSL_CERT_FILE, gntpem,
286
                constants.NODE_INITD_SCRIPT))
287

    
288
  result = sshrunner.Run(node, 'root', mycommand, batch=False,
289
                         ask_key=ssh_key_check,
290
                         use_cluster_key=False,
291
                         strict_host_check=ssh_key_check)
292
  if result.failed:
293
    raise errors.OpExecError("Remote command on node %s, error: %s,"
294
                             " output: %s" %
295
                             (node, result.fail_reason, result.output))
296

    
297
  return 0
298

    
299

    
300
def MasterFailover():
301
  """Failover the master node.
302

303
  This checks that we are not already the master, and will cause the
304
  current master to cease being master, and the non-master to become
305
  new master.
306

307
  """
308
  ss = ssconf.WritableSimpleStore()
309

    
310
  new_master = utils.HostInfo().name
311
  old_master = ss.GetMasterNode()
312

    
313
  if old_master == new_master:
314
    raise errors.OpPrereqError("This commands must be run on the node"
315
                               " where you want the new master to be."
316
                               " %s is already the master" %
317
                               old_master)
318
  # end checks
319

    
320
  rcode = 0
321

    
322
  logging.info("setting master to %s, old master: %s", new_master, old_master)
323

    
324
  if not rpc.call_node_stop_master(old_master, True):
325
    logging.error("could disable the master role on the old master"
326
                 " %s, please disable manually", old_master)
327

    
328
  ss.SetKey(ss.SS_MASTER_NODE, new_master)
329

    
330
  cfg = config.ConfigWriter()
331

    
332
  if not rpc.call_upload_file(cfg.GetNodeList(),
333
                              ss.KeyToFilename(ss.SS_MASTER_NODE)):
334
    logging.error("could not distribute the new simple store master file"
335
                  " to the other nodes, please check.")
336

    
337
  if not rpc.call_node_start_master(new_master, True):
338
    logging.error("could not start the master role on the new master"
339
                  " %s, please check", new_master)
340
    rcode = 1
341

    
342
  return rcode