root / lib / bootstrap.py @ 9dae41ad
History | View | Annotate | Download (17.5 kB)
1 |
#
|
---|---|
2 |
#
|
3 |
|
4 |
# Copyright (C) 2006, 2007, 2008 Google Inc.
|
5 |
#
|
6 |
# This program is free software; you can redistribute it and/or modify
|
7 |
# it under the terms of the GNU General Public License as published by
|
8 |
# the Free Software Foundation; either version 2 of the License, or
|
9 |
# (at your option) any later version.
|
10 |
#
|
11 |
# This program is distributed in the hope that it will be useful, but
|
12 |
# WITHOUT ANY WARRANTY; without even the implied warranty of
|
13 |
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
14 |
# General Public License for more details.
|
15 |
#
|
16 |
# You should have received a copy of the GNU General Public License
|
17 |
# along with this program; if not, write to the Free Software
|
18 |
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
19 |
# 02110-1301, USA.
|
20 |
|
21 |
|
22 |
"""Functions to bootstrap a new cluster.
|
23 |
|
24 |
"""
|
25 |
|
26 |
import os |
27 |
import os.path |
28 |
import sha |
29 |
import re |
30 |
import logging |
31 |
import tempfile |
32 |
|
33 |
from ganeti import rpc |
34 |
from ganeti import ssh |
35 |
from ganeti import utils |
36 |
from ganeti import errors |
37 |
from ganeti import config |
38 |
from ganeti import constants |
39 |
from ganeti import objects |
40 |
from ganeti import ssconf |
41 |
from ganeti import hypervisor |
42 |
|
43 |
|
44 |
def _InitSSHSetup(): |
45 |
"""Setup the SSH configuration for the cluster.
|
46 |
|
47 |
This generates a dsa keypair for root, adds the pub key to the
|
48 |
permitted hosts and adds the hostkey to its own known hosts.
|
49 |
|
50 |
"""
|
51 |
priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS) |
52 |
|
53 |
for name in priv_key, pub_key: |
54 |
if os.path.exists(name):
|
55 |
utils.CreateBackup(name) |
56 |
utils.RemoveFile(name) |
57 |
|
58 |
result = utils.RunCmd(["ssh-keygen", "-t", "dsa", |
59 |
"-f", priv_key,
|
60 |
"-q", "-N", ""]) |
61 |
if result.failed:
|
62 |
raise errors.OpExecError("Could not generate ssh keypair, error %s" % |
63 |
result.output) |
64 |
|
65 |
f = open(pub_key, 'r') |
66 |
try:
|
67 |
utils.AddAuthorizedKey(auth_keys, f.read(8192))
|
68 |
finally:
|
69 |
f.close() |
70 |
|
71 |
|
72 |
def _GenerateSelfSignedSslCert(file_name, validity=(365 * 5)): |
73 |
"""Generates a self-signed SSL certificate.
|
74 |
|
75 |
@type file_name: str
|
76 |
@param file_name: Path to output file
|
77 |
@type validity: int
|
78 |
@param validity: Validity for certificate in days
|
79 |
|
80 |
"""
|
81 |
(fd, tmp_file_name) = tempfile.mkstemp(dir=os.path.dirname(file_name)) |
82 |
try:
|
83 |
# Set permissions before writing key
|
84 |
os.chmod(tmp_file_name, 0600)
|
85 |
|
86 |
result = utils.RunCmd(["openssl", "req", "-new", "-newkey", "rsa:1024", |
87 |
"-days", str(validity), "-nodes", "-x509", |
88 |
"-keyout", tmp_file_name, "-out", tmp_file_name, |
89 |
"-batch"])
|
90 |
if result.failed:
|
91 |
raise errors.OpExecError("Could not generate SSL certificate, command" |
92 |
" %s had exitcode %s and error message %s" %
|
93 |
(result.cmd, result.exit_code, result.output)) |
94 |
|
95 |
# Make read-only
|
96 |
os.chmod(tmp_file_name, 0400)
|
97 |
|
98 |
os.rename(tmp_file_name, file_name) |
99 |
finally:
|
100 |
utils.RemoveFile(tmp_file_name) |
101 |
|
102 |
|
103 |
def _InitGanetiServerSetup(): |
104 |
"""Setup the necessary configuration for the initial node daemon.
|
105 |
|
106 |
This creates the nodepass file containing the shared password for
|
107 |
the cluster and also generates the SSL certificate.
|
108 |
|
109 |
"""
|
110 |
_GenerateSelfSignedSslCert(constants.SSL_CERT_FILE) |
111 |
|
112 |
# Don't overwrite existing file
|
113 |
if not os.path.exists(constants.RAPI_CERT_FILE): |
114 |
_GenerateSelfSignedSslCert(constants.RAPI_CERT_FILE) |
115 |
|
116 |
result = utils.RunCmd([constants.NODE_INITD_SCRIPT, "restart"])
|
117 |
|
118 |
if result.failed:
|
119 |
raise errors.OpExecError("Could not start the node daemon, command %s" |
120 |
" had exitcode %s and error %s" %
|
121 |
(result.cmd, result.exit_code, result.output)) |
122 |
|
123 |
|
124 |
def InitCluster(cluster_name, mac_prefix, def_bridge, |
125 |
master_netdev, file_storage_dir, candidate_pool_size, |
126 |
secondary_ip=None, vg_name=None, beparams=None, hvparams=None, |
127 |
enabled_hypervisors=None, default_hypervisor=None): |
128 |
"""Initialise the cluster.
|
129 |
|
130 |
@type candidate_pool_size: int
|
131 |
@param candidate_pool_size: master candidate pool size
|
132 |
|
133 |
"""
|
134 |
# TODO: complete the docstring
|
135 |
if config.ConfigWriter.IsCluster():
|
136 |
raise errors.OpPrereqError("Cluster is already initialised") |
137 |
|
138 |
hostname = utils.HostInfo() |
139 |
|
140 |
if hostname.ip.startswith("127."): |
141 |
raise errors.OpPrereqError("This host's IP resolves to the private" |
142 |
" range (%s). Please fix DNS or %s." %
|
143 |
(hostname.ip, constants.ETC_HOSTS)) |
144 |
|
145 |
if not utils.OwnIpAddress(hostname.ip): |
146 |
raise errors.OpPrereqError("Inconsistency: this host's name resolves" |
147 |
" to %s,\nbut this ip address does not"
|
148 |
" belong to this host."
|
149 |
" Aborting." % hostname.ip)
|
150 |
|
151 |
clustername = utils.HostInfo(cluster_name) |
152 |
|
153 |
if utils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT,
|
154 |
timeout=5):
|
155 |
raise errors.OpPrereqError("Cluster IP already active. Aborting.") |
156 |
|
157 |
if secondary_ip:
|
158 |
if not utils.IsValidIP(secondary_ip): |
159 |
raise errors.OpPrereqError("Invalid secondary ip given") |
160 |
if (secondary_ip != hostname.ip and |
161 |
not utils.OwnIpAddress(secondary_ip)):
|
162 |
raise errors.OpPrereqError("You gave %s as secondary IP," |
163 |
" but it does not belong to this host." %
|
164 |
secondary_ip) |
165 |
else:
|
166 |
secondary_ip = hostname.ip |
167 |
|
168 |
if vg_name is not None: |
169 |
# Check if volume group is valid
|
170 |
vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name, |
171 |
constants.MIN_VG_SIZE) |
172 |
if vgstatus:
|
173 |
raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if" |
174 |
" you are not using lvm" % vgstatus)
|
175 |
|
176 |
file_storage_dir = os.path.normpath(file_storage_dir) |
177 |
|
178 |
if not os.path.isabs(file_storage_dir): |
179 |
raise errors.OpPrereqError("The file storage directory you passed is" |
180 |
" not an absolute path.")
|
181 |
|
182 |
if not os.path.exists(file_storage_dir): |
183 |
try:
|
184 |
os.makedirs(file_storage_dir, 0750)
|
185 |
except OSError, err: |
186 |
raise errors.OpPrereqError("Cannot create file storage directory" |
187 |
" '%s': %s" %
|
188 |
(file_storage_dir, err)) |
189 |
|
190 |
if not os.path.isdir(file_storage_dir): |
191 |
raise errors.OpPrereqError("The file storage directory '%s' is not" |
192 |
" a directory." % file_storage_dir)
|
193 |
|
194 |
if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix): |
195 |
raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix) |
196 |
|
197 |
result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev]) |
198 |
if result.failed:
|
199 |
raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" % |
200 |
(master_netdev, |
201 |
result.output.strip())) |
202 |
|
203 |
if not (os.path.isfile(constants.NODE_INITD_SCRIPT) and |
204 |
os.access(constants.NODE_INITD_SCRIPT, os.X_OK)): |
205 |
raise errors.OpPrereqError("Init.d script '%s' missing or not" |
206 |
" executable." % constants.NODE_INITD_SCRIPT)
|
207 |
|
208 |
dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)] |
209 |
utils.EnsureDirs(dirs) |
210 |
|
211 |
utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) |
212 |
# hvparams is a mapping of hypervisor->hvparams dict
|
213 |
for hv_name, hv_params in hvparams.iteritems(): |
214 |
utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) |
215 |
hv_class = hypervisor.GetHypervisor(hv_name) |
216 |
hv_class.CheckParameterSyntax(hv_params) |
217 |
|
218 |
# set up the inter-node password and certificate
|
219 |
_InitGanetiServerSetup() |
220 |
|
221 |
# set up ssh config and /etc/hosts
|
222 |
f = open(constants.SSH_HOST_RSA_PUB, 'r') |
223 |
try:
|
224 |
sshline = f.read() |
225 |
finally:
|
226 |
f.close() |
227 |
sshkey = sshline.split(" ")[1] |
228 |
|
229 |
utils.AddHostToEtcHosts(hostname.name) |
230 |
_InitSSHSetup() |
231 |
|
232 |
# init of cluster config file
|
233 |
cluster_config = objects.Cluster( |
234 |
serial_no=1,
|
235 |
rsahostkeypub=sshkey, |
236 |
highest_used_port=(constants.FIRST_DRBD_PORT - 1),
|
237 |
mac_prefix=mac_prefix, |
238 |
volume_group_name=vg_name, |
239 |
default_bridge=def_bridge, |
240 |
tcpudp_port_pool=set(),
|
241 |
master_node=hostname.name, |
242 |
master_ip=clustername.ip, |
243 |
master_netdev=master_netdev, |
244 |
cluster_name=clustername.name, |
245 |
file_storage_dir=file_storage_dir, |
246 |
enabled_hypervisors=enabled_hypervisors, |
247 |
default_hypervisor=default_hypervisor, |
248 |
beparams={constants.BEGR_DEFAULT: beparams}, |
249 |
hvparams=hvparams, |
250 |
candidate_pool_size=candidate_pool_size, |
251 |
) |
252 |
master_node_config = objects.Node(name=hostname.name, |
253 |
primary_ip=hostname.ip, |
254 |
secondary_ip=secondary_ip, |
255 |
serial_no=1,
|
256 |
master_candidate=True,
|
257 |
offline=False, drained=False, |
258 |
) |
259 |
|
260 |
sscfg = InitConfig(constants.CONFIG_VERSION, |
261 |
cluster_config, master_node_config) |
262 |
ssh.WriteKnownHostsFile(sscfg, constants.SSH_KNOWN_HOSTS_FILE) |
263 |
cfg = config.ConfigWriter() |
264 |
cfg.Update(cfg.GetClusterInfo()) |
265 |
|
266 |
# start the master ip
|
267 |
# TODO: Review rpc call from bootstrap
|
268 |
rpc.RpcRunner.call_node_start_master(hostname.name, True)
|
269 |
|
270 |
|
271 |
def InitConfig(version, cluster_config, master_node_config, |
272 |
cfg_file=constants.CLUSTER_CONF_FILE): |
273 |
"""Create the initial cluster configuration.
|
274 |
|
275 |
It will contain the current node, which will also be the master
|
276 |
node, and no instances.
|
277 |
|
278 |
@type version: int
|
279 |
@param version: configuration version
|
280 |
@type cluster_config: L{objects.Cluster}
|
281 |
@param cluster_config: cluster configuration
|
282 |
@type master_node_config: L{objects.Node}
|
283 |
@param master_node_config: master node configuration
|
284 |
@type cfg_file: string
|
285 |
@param cfg_file: configuration file path
|
286 |
|
287 |
@rtype: L{ssconf.SimpleConfigWriter}
|
288 |
@return: initialized config instance
|
289 |
|
290 |
"""
|
291 |
nodes = { |
292 |
master_node_config.name: master_node_config, |
293 |
} |
294 |
|
295 |
config_data = objects.ConfigData(version=version, |
296 |
cluster=cluster_config, |
297 |
nodes=nodes, |
298 |
instances={}, |
299 |
serial_no=1)
|
300 |
cfg = ssconf.SimpleConfigWriter.FromDict(config_data.ToDict(), cfg_file) |
301 |
cfg.Save() |
302 |
|
303 |
return cfg
|
304 |
|
305 |
|
306 |
def FinalizeClusterDestroy(master): |
307 |
"""Execute the last steps of cluster destroy
|
308 |
|
309 |
This function shuts down all the daemons, completing the destroy
|
310 |
begun in cmdlib.LUDestroyOpcode.
|
311 |
|
312 |
"""
|
313 |
result = rpc.RpcRunner.call_node_stop_master(master, True)
|
314 |
if result.failed or not result.data: |
315 |
logging.warning("Could not disable the master role")
|
316 |
result = rpc.RpcRunner.call_node_leave_cluster(master) |
317 |
if result.failed or not result.data: |
318 |
logging.warning("Could not shutdown the node daemon and cleanup the node")
|
319 |
|
320 |
|
321 |
def SetupNodeDaemon(cluster_name, node, ssh_key_check): |
322 |
"""Add a node to the cluster.
|
323 |
|
324 |
This function must be called before the actual opcode, and will ssh
|
325 |
to the remote node, copy the needed files, and start ganeti-noded,
|
326 |
allowing the master to do the rest via normal rpc calls.
|
327 |
|
328 |
@param cluster_name: the cluster name
|
329 |
@param node: the name of the new node
|
330 |
@param ssh_key_check: whether to do a strict key check
|
331 |
|
332 |
"""
|
333 |
sshrunner = ssh.SshRunner(cluster_name) |
334 |
|
335 |
noded_cert = utils.ReadFile(constants.SSL_CERT_FILE) |
336 |
rapi_cert = utils.ReadFile(constants.RAPI_CERT_FILE) |
337 |
|
338 |
# in the base64 pem encoding, neither '!' nor '.' are valid chars,
|
339 |
# so we use this to detect an invalid certificate; as long as the
|
340 |
# cert doesn't contain this, the here-document will be correctly
|
341 |
# parsed by the shell sequence below
|
342 |
if (re.search('^!EOF\.', noded_cert, re.MULTILINE) or |
343 |
re.search('^!EOF\.', rapi_cert, re.MULTILINE)):
|
344 |
raise errors.OpExecError("invalid PEM encoding in the SSL certificate") |
345 |
|
346 |
if not noded_cert.endswith("\n"): |
347 |
noded_cert += "\n"
|
348 |
if not rapi_cert.endswith("\n"): |
349 |
rapi_cert += "\n"
|
350 |
|
351 |
# set up inter-node password and certificate and restarts the node daemon
|
352 |
# and then connect with ssh to set password and start ganeti-noded
|
353 |
# note that all the below variables are sanitized at this point,
|
354 |
# either by being constants or by the checks above
|
355 |
mycommand = ("umask 077 && "
|
356 |
"cat > '%s' << '!EOF.' && \n"
|
357 |
"%s!EOF.\n"
|
358 |
"cat > '%s' << '!EOF.' && \n"
|
359 |
"%s!EOF.\n"
|
360 |
"chmod 0400 %s %s && "
|
361 |
"%s restart" %
|
362 |
(constants.SSL_CERT_FILE, noded_cert, |
363 |
constants.RAPI_CERT_FILE, rapi_cert, |
364 |
constants.SSL_CERT_FILE, constants.RAPI_CERT_FILE, |
365 |
constants.NODE_INITD_SCRIPT)) |
366 |
|
367 |
result = sshrunner.Run(node, 'root', mycommand, batch=False, |
368 |
ask_key=ssh_key_check, |
369 |
use_cluster_key=False,
|
370 |
strict_host_check=ssh_key_check) |
371 |
if result.failed:
|
372 |
raise errors.OpExecError("Remote command on node %s, error: %s," |
373 |
" output: %s" %
|
374 |
(node, result.fail_reason, result.output)) |
375 |
|
376 |
|
377 |
def MasterFailover(): |
378 |
"""Failover the master node.
|
379 |
|
380 |
This checks that we are not already the master, and will cause the
|
381 |
current master to cease being master, and the non-master to become
|
382 |
new master.
|
383 |
|
384 |
"""
|
385 |
sstore = ssconf.SimpleStore() |
386 |
|
387 |
old_master, new_master = ssconf.GetMasterAndMyself(sstore) |
388 |
node_list = sstore.GetNodeList() |
389 |
mc_list = sstore.GetMasterCandidates() |
390 |
|
391 |
if old_master == new_master:
|
392 |
raise errors.OpPrereqError("This commands must be run on the node" |
393 |
" where you want the new master to be."
|
394 |
" %s is already the master" %
|
395 |
old_master) |
396 |
|
397 |
if new_master not in mc_list: |
398 |
mc_no_master = [name for name in mc_list if name != old_master] |
399 |
raise errors.OpPrereqError("This node is not among the nodes marked" |
400 |
" as master candidates. Only these nodes"
|
401 |
" can become masters. Current list of"
|
402 |
" master candidates is:\n"
|
403 |
"%s" % ('\n'.join(mc_no_master))) |
404 |
|
405 |
vote_list = GatherMasterVotes(node_list) |
406 |
|
407 |
if vote_list:
|
408 |
voted_master = vote_list[0][0] |
409 |
if voted_master is None: |
410 |
raise errors.OpPrereqError("Cluster is inconsistent, most nodes did not" |
411 |
" respond.")
|
412 |
elif voted_master != old_master:
|
413 |
raise errors.OpPrereqError("I have wrong configuration, I believe the" |
414 |
" master is %s but the other nodes voted for"
|
415 |
" %s. Please resync the configuration of"
|
416 |
" this node." % (old_master, voted_master))
|
417 |
# end checks
|
418 |
|
419 |
rcode = 0
|
420 |
|
421 |
logging.info("Setting master to %s, old master: %s", new_master, old_master)
|
422 |
|
423 |
result = rpc.RpcRunner.call_node_stop_master(old_master, True)
|
424 |
if result.failed or not result.data: |
425 |
logging.error("Could not disable the master role on the old master"
|
426 |
" %s, please disable manually", old_master)
|
427 |
|
428 |
# Here we have a phase where no master should be running
|
429 |
|
430 |
# instantiate a real config writer, as we now know we have the
|
431 |
# configuration data
|
432 |
cfg = config.ConfigWriter() |
433 |
|
434 |
cluster_info = cfg.GetClusterInfo() |
435 |
cluster_info.master_node = new_master |
436 |
# this will also regenerate the ssconf files, since we updated the
|
437 |
# cluster info
|
438 |
cfg.Update(cluster_info) |
439 |
|
440 |
result = rpc.RpcRunner.call_node_start_master(new_master, True)
|
441 |
if result.failed or not result.data: |
442 |
logging.error("Could not start the master role on the new master"
|
443 |
" %s, please check", new_master)
|
444 |
rcode = 1
|
445 |
|
446 |
return rcode
|
447 |
|
448 |
|
449 |
def GatherMasterVotes(node_list): |
450 |
"""Check the agreement on who is the master.
|
451 |
|
452 |
This function will return a list of (node, number of votes), ordered
|
453 |
by the number of votes. Errors will be denoted by the key 'None'.
|
454 |
|
455 |
Note that the sum of votes is the number of nodes this machine
|
456 |
knows, whereas the number of entries in the list could be different
|
457 |
(if some nodes vote for another master).
|
458 |
|
459 |
We remove ourselves from the list since we know that (bugs aside)
|
460 |
since we use the same source for configuration information for both
|
461 |
backend and boostrap, we'll always vote for ourselves.
|
462 |
|
463 |
@type node_list: list
|
464 |
@param node_list: the list of nodes to query for master info; the current
|
465 |
node wil be removed if it is in the list
|
466 |
@rtype: list
|
467 |
@return: list of (node, votes)
|
468 |
|
469 |
"""
|
470 |
myself = utils.HostInfo().name |
471 |
try:
|
472 |
node_list.remove(myself) |
473 |
except ValueError: |
474 |
pass
|
475 |
if not node_list: |
476 |
# no nodes left (eventually after removing myself)
|
477 |
return []
|
478 |
results = rpc.RpcRunner.call_master_info(node_list) |
479 |
if not isinstance(results, dict): |
480 |
# this should not happen (unless internal error in rpc)
|
481 |
logging.critical("Can't complete rpc call, aborting master startup")
|
482 |
return [(None, len(node_list))] |
483 |
votes = {} |
484 |
for node in results: |
485 |
nres = results[node] |
486 |
data = nres.data |
487 |
if nres.failed or not isinstance(data, (tuple, list)) or len(data) < 3: |
488 |
# here the rpc layer should have already logged errors
|
489 |
if None not in votes: |
490 |
votes[None] = 0 |
491 |
votes[None] += 1 |
492 |
continue
|
493 |
master_node = data[2]
|
494 |
if master_node not in votes: |
495 |
votes[master_node] = 0
|
496 |
votes[master_node] += 1
|
497 |
|
498 |
vote_list = [v for v in votes.items()] |
499 |
# sort first on number of votes then on name, since we want None
|
500 |
# sorted later if we have the half of the nodes not responding, and
|
501 |
# half voting all for the same master
|
502 |
vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True) |
503 |
|
504 |
return vote_list
|